Subversion Repositories Kolibri OS

Rev

Rev 5078 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include "vmwgfx_drv.h"
29
#include "vmwgfx_resource_priv.h"
30
#include 
31
#include "svga3d_surfacedefs.h"
32
 
33
/**
34
 * struct vmw_user_surface - User-space visible surface resource
35
 *
36
 * @base:           The TTM base object handling user-space visibility.
37
 * @srf:            The surface metadata.
38
 * @size:           TTM accounting size for the surface.
39
 */
40
struct vmw_user_surface {
41
	struct ttm_base_object base;
42
	struct vmw_surface srf;
43
	uint32_t size;
44
	uint32_t backup_handle;
45
};
46
 
47
/**
48
 * struct vmw_surface_offset - Backing store mip level offset info
49
 *
50
 * @face:           Surface face.
51
 * @mip:            Mip level.
52
 * @bo_offset:      Offset into backing store of this mip level.
53
 *
54
 */
55
struct vmw_surface_offset {
56
	uint32_t face;
57
	uint32_t mip;
58
	uint32_t bo_offset;
59
};
60
 
61
static void vmw_user_surface_free(struct vmw_resource *res);
62
static struct vmw_resource *
63
vmw_user_surface_base_to_res(struct ttm_base_object *base);
64
static int vmw_legacy_srf_bind(struct vmw_resource *res,
65
			       struct ttm_validate_buffer *val_buf);
66
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
67
				 bool readback,
68
				 struct ttm_validate_buffer *val_buf);
69
static int vmw_legacy_srf_create(struct vmw_resource *res);
70
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
71
 
72
static const struct vmw_user_resource_conv user_surface_conv = {
73
	.object_type = VMW_RES_SURFACE,
74
	.base_obj_to_res = vmw_user_surface_base_to_res,
75
	.res_free = vmw_user_surface_free
76
};
77
 
78
const struct vmw_user_resource_conv *user_surface_converter =
79
	&user_surface_conv;
80
 
81
 
82
static uint64_t vmw_user_surface_size;
83
 
84
static const struct vmw_res_func vmw_legacy_surface_func = {
85
	.res_type = vmw_res_surface,
86
	.needs_backup = false,
87
	.may_evict = true,
88
	.type_name = "legacy surfaces",
89
	.backup_placement = &vmw_srf_placement,
90
	.create = &vmw_legacy_srf_create,
91
	.destroy = &vmw_legacy_srf_destroy,
92
	.bind = &vmw_legacy_srf_bind,
93
	.unbind = &vmw_legacy_srf_unbind
94
};
95
 
96
/**
97
 * struct vmw_surface_dma - SVGA3D DMA command
98
 */
99
struct vmw_surface_dma {
100
	SVGA3dCmdHeader header;
101
	SVGA3dCmdSurfaceDMA body;
102
	SVGA3dCopyBox cb;
103
	SVGA3dCmdSurfaceDMASuffix suffix;
104
};
105
 
106
/**
107
 * struct vmw_surface_define - SVGA3D Surface Define command
108
 */
109
struct vmw_surface_define {
110
	SVGA3dCmdHeader header;
111
	SVGA3dCmdDefineSurface body;
112
};
113
 
114
/**
115
 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
116
 */
117
struct vmw_surface_destroy {
118
	SVGA3dCmdHeader header;
119
	SVGA3dCmdDestroySurface body;
120
};
121
 
122
 
123
/**
124
 * vmw_surface_dma_size - Compute fifo size for a dma command.
125
 *
126
 * @srf: Pointer to a struct vmw_surface
127
 *
128
 * Computes the required size for a surface dma command for backup or
129
 * restoration of the surface represented by @srf.
130
 */
131
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
132
{
133
	return srf->num_sizes * sizeof(struct vmw_surface_dma);
134
}
135
 
136
 
137
/**
138
 * vmw_surface_define_size - Compute fifo size for a surface define command.
139
 *
140
 * @srf: Pointer to a struct vmw_surface
141
 *
142
 * Computes the required size for a surface define command for the definition
143
 * of the surface represented by @srf.
144
 */
145
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
146
{
147
	return sizeof(struct vmw_surface_define) + srf->num_sizes *
148
		sizeof(SVGA3dSize);
149
}
150
 
151
 
152
/**
153
 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
154
 *
155
 * Computes the required size for a surface destroy command for the destruction
156
 * of a hw surface.
157
 */
158
static inline uint32_t vmw_surface_destroy_size(void)
159
{
160
	return sizeof(struct vmw_surface_destroy);
161
}
162
 
163
/**
164
 * vmw_surface_destroy_encode - Encode a surface_destroy command.
165
 *
166
 * @id: The surface id
167
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
168
 */
169
static void vmw_surface_destroy_encode(uint32_t id,
170
				       void *cmd_space)
171
{
172
	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
173
		cmd_space;
174
 
175
	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
176
	cmd->header.size = sizeof(cmd->body);
177
	cmd->body.sid = id;
178
}
179
 
180
/**
181
 * vmw_surface_define_encode - Encode a surface_define command.
182
 *
183
 * @srf: Pointer to a struct vmw_surface object.
184
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
185
 */
186
static void vmw_surface_define_encode(const struct vmw_surface *srf,
187
				      void *cmd_space)
188
{
189
	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
190
		cmd_space;
191
	struct drm_vmw_size *src_size;
192
	SVGA3dSize *cmd_size;
193
	uint32_t cmd_len;
194
	int i;
195
 
196
	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
197
 
198
	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
199
	cmd->header.size = cmd_len;
200
	cmd->body.sid = srf->res.id;
201
	cmd->body.surfaceFlags = srf->flags;
202
	cmd->body.format = cpu_to_le32(srf->format);
203
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
204
		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
205
 
206
	cmd += 1;
207
	cmd_size = (SVGA3dSize *) cmd;
208
	src_size = srf->sizes;
209
 
210
	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
211
		cmd_size->width = src_size->width;
212
		cmd_size->height = src_size->height;
213
		cmd_size->depth = src_size->depth;
214
	}
215
}
216
 
217
/**
218
 * vmw_surface_dma_encode - Encode a surface_dma command.
219
 *
220
 * @srf: Pointer to a struct vmw_surface object.
221
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
222
 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
223
 * should be placed or read from.
224
 * @to_surface: Boolean whether to DMA to the surface or from the surface.
225
 */
226
static void vmw_surface_dma_encode(struct vmw_surface *srf,
227
				   void *cmd_space,
228
				   const SVGAGuestPtr *ptr,
229
				   bool to_surface)
230
{
231
	uint32_t i;
232
	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
233
	const struct svga3d_surface_desc *desc =
234
		svga3dsurface_get_desc(srf->format);
235
 
236
	for (i = 0; i < srf->num_sizes; ++i) {
237
		SVGA3dCmdHeader *header = &cmd->header;
238
		SVGA3dCmdSurfaceDMA *body = &cmd->body;
239
		SVGA3dCopyBox *cb = &cmd->cb;
240
		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
241
		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
242
		const struct drm_vmw_size *cur_size = &srf->sizes[i];
243
 
244
		header->id = SVGA_3D_CMD_SURFACE_DMA;
245
		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
246
 
247
		body->guest.ptr = *ptr;
248
		body->guest.ptr.offset += cur_offset->bo_offset;
249
		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
250
								  cur_size);
251
		body->host.sid = srf->res.id;
252
		body->host.face = cur_offset->face;
253
		body->host.mipmap = cur_offset->mip;
254
		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
255
				  SVGA3D_READ_HOST_VRAM);
256
		cb->x = 0;
257
		cb->y = 0;
258
		cb->z = 0;
259
		cb->srcx = 0;
260
		cb->srcy = 0;
261
		cb->srcz = 0;
262
		cb->w = cur_size->width;
263
		cb->h = cur_size->height;
264
		cb->d = cur_size->depth;
265
 
266
		suffix->suffixSize = sizeof(*suffix);
267
		suffix->maximumOffset =
268
			svga3dsurface_get_image_buffer_size(desc, cur_size,
269
							    body->guest.pitch);
270
		suffix->flags.discard = 0;
271
		suffix->flags.unsynchronized = 0;
272
		suffix->flags.reserved = 0;
273
		++cmd;
274
	}
275
};
276
 
277
 
278
/**
279
 * vmw_hw_surface_destroy - destroy a Device surface
280
 *
281
 * @res:        Pointer to a struct vmw_resource embedded in a struct
282
 *              vmw_surface.
283
 *
284
 * Destroys a the device surface associated with a struct vmw_surface if
285
 * any, and adjusts accounting and resource count accordingly.
286
 */
287
static void vmw_hw_surface_destroy(struct vmw_resource *res)
288
{
289
 
290
	struct vmw_private *dev_priv = res->dev_priv;
291
	struct vmw_surface *srf;
292
	void *cmd;
293
 
294
	if (res->id != -1) {
295
 
296
		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
297
		if (unlikely(cmd == NULL)) {
298
			DRM_ERROR("Failed reserving FIFO space for surface "
299
				  "destruction.\n");
300
			return;
301
		}
302
 
303
		vmw_surface_destroy_encode(res->id, cmd);
304
		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
305
 
306
		/*
307
		 * used_memory_size_atomic, or separate lock
308
		 * to avoid taking dev_priv::cmdbuf_mutex in
309
		 * the destroy path.
310
		 */
311
 
312
		mutex_lock(&dev_priv->cmdbuf_mutex);
313
		srf = vmw_res_to_srf(res);
314
		dev_priv->used_memory_size -= res->backup_size;
315
		mutex_unlock(&dev_priv->cmdbuf_mutex);
316
	}
317
	vmw_3d_resource_dec(dev_priv, false);
318
}
319
 
320
/**
321
 * vmw_legacy_srf_create - Create a device surface as part of the
322
 * resource validation process.
323
 *
324
 * @res: Pointer to a struct vmw_surface.
325
 *
326
 * If the surface doesn't have a hw id.
327
 *
328
 * Returns -EBUSY if there wasn't sufficient device resources to
329
 * complete the validation. Retry after freeing up resources.
330
 *
331
 * May return other errors if the kernel is out of guest resources.
332
 */
333
static int vmw_legacy_srf_create(struct vmw_resource *res)
334
{
335
	struct vmw_private *dev_priv = res->dev_priv;
336
	struct vmw_surface *srf;
337
	uint32_t submit_size;
338
	uint8_t *cmd;
339
	int ret;
340
 
341
	if (likely(res->id != -1))
342
		return 0;
343
 
344
	srf = vmw_res_to_srf(res);
345
	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
346
		     dev_priv->memory_size))
347
		return -EBUSY;
348
 
349
	/*
350
	 * Alloc id for the resource.
351
	 */
352
 
353
	ret = vmw_resource_alloc_id(res);
354
	if (unlikely(ret != 0)) {
355
		DRM_ERROR("Failed to allocate a surface id.\n");
356
		goto out_no_id;
357
	}
358
 
359
	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
360
		ret = -EBUSY;
361
		goto out_no_fifo;
362
	}
363
 
364
	/*
365
	 * Encode surface define- commands.
366
	 */
367
 
368
	submit_size = vmw_surface_define_size(srf);
369
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
370
	if (unlikely(cmd == NULL)) {
371
		DRM_ERROR("Failed reserving FIFO space for surface "
372
			  "creation.\n");
373
		ret = -ENOMEM;
374
		goto out_no_fifo;
375
	}
376
 
377
	vmw_surface_define_encode(srf, cmd);
378
	vmw_fifo_commit(dev_priv, submit_size);
379
	/*
380
	 * Surface memory usage accounting.
381
	 */
382
 
383
	dev_priv->used_memory_size += res->backup_size;
384
	return 0;
385
 
386
out_no_fifo:
387
	vmw_resource_release_id(res);
388
out_no_id:
389
	return ret;
390
}
391
 
392
/**
393
 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
394
 *
395
 * @res:            Pointer to a struct vmw_res embedded in a struct
396
 *                  vmw_surface.
397
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
398
 *                  information about the backup buffer.
399
 * @bind:           Boolean wether to DMA to the surface.
400
 *
401
 * Transfer backup data to or from a legacy surface as part of the
402
 * validation process.
403
 * May return other errors if the kernel is out of guest resources.
404
 * The backup buffer will be fenced or idle upon successful completion,
405
 * and if the surface needs persistent backup storage, the backup buffer
406
 * will also be returned reserved iff @bind is true.
407
 */
408
static int vmw_legacy_srf_dma(struct vmw_resource *res,
409
			      struct ttm_validate_buffer *val_buf,
410
			      bool bind)
411
{
412
	SVGAGuestPtr ptr;
413
	struct vmw_fence_obj *fence;
414
	uint32_t submit_size;
415
	struct vmw_surface *srf = vmw_res_to_srf(res);
416
	uint8_t *cmd;
417
	struct vmw_private *dev_priv = res->dev_priv;
418
 
419
	BUG_ON(val_buf->bo == NULL);
420
 
421
	submit_size = vmw_surface_dma_size(srf);
422
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
423
	if (unlikely(cmd == NULL)) {
424
		DRM_ERROR("Failed reserving FIFO space for surface "
425
			  "DMA.\n");
426
		return -ENOMEM;
427
	}
428
	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
429
	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
430
 
431
	vmw_fifo_commit(dev_priv, submit_size);
432
 
433
	/*
434
	 * Create a fence object and fence the backup buffer.
435
	 */
436
 
437
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
438
					  &fence, NULL);
439
 
440
	vmw_fence_single_bo(val_buf->bo, fence);
441
 
442
	if (likely(fence != NULL))
443
		vmw_fence_obj_unreference(&fence);
444
 
445
	return 0;
446
}
447
 
448
/**
449
 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
450
 *                       surface validation process.
451
 *
452
 * @res:            Pointer to a struct vmw_res embedded in a struct
453
 *                  vmw_surface.
454
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
455
 *                  information about the backup buffer.
456
 *
457
 * This function will copy backup data to the surface if the
458
 * backup buffer is dirty.
459
 */
460
static int vmw_legacy_srf_bind(struct vmw_resource *res,
461
			       struct ttm_validate_buffer *val_buf)
462
{
463
	if (!res->backup_dirty)
464
		return 0;
465
 
466
	return vmw_legacy_srf_dma(res, val_buf, true);
467
}
468
 
469
 
470
/**
471
 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
472
 *                         surface eviction process.
473
 *
474
 * @res:            Pointer to a struct vmw_res embedded in a struct
475
 *                  vmw_surface.
476
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
477
 *                  information about the backup buffer.
478
 *
479
 * This function will copy backup data from the surface.
480
 */
481
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
482
				 bool readback,
483
				 struct ttm_validate_buffer *val_buf)
484
{
485
	if (unlikely(readback))
486
		return vmw_legacy_srf_dma(res, val_buf, false);
487
	return 0;
488
}
489
 
490
/**
491
 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
492
 *                          resource eviction process.
493
 *
494
 * @res:            Pointer to a struct vmw_res embedded in a struct
495
 *                  vmw_surface.
496
 */
497
static int vmw_legacy_srf_destroy(struct vmw_resource *res)
498
{
499
	struct vmw_private *dev_priv = res->dev_priv;
500
	uint32_t submit_size;
501
	uint8_t *cmd;
502
 
503
	BUG_ON(res->id == -1);
504
 
505
	/*
506
	 * Encode the dma- and surface destroy commands.
507
	 */
508
 
509
	submit_size = vmw_surface_destroy_size();
510
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
511
	if (unlikely(cmd == NULL)) {
512
		DRM_ERROR("Failed reserving FIFO space for surface "
513
			  "eviction.\n");
514
		return -ENOMEM;
515
	}
516
 
517
	vmw_surface_destroy_encode(res->id, cmd);
518
	vmw_fifo_commit(dev_priv, submit_size);
519
 
520
	/*
521
	 * Surface memory usage accounting.
522
	 */
523
 
524
	dev_priv->used_memory_size -= res->backup_size;
525
 
526
	/*
527
	 * Release the surface ID.
528
	 */
529
 
530
	vmw_resource_release_id(res);
531
 
532
	return 0;
533
}
534
 
535
 
536
/**
537
 * vmw_surface_init - initialize a struct vmw_surface
538
 *
539
 * @dev_priv:       Pointer to a device private struct.
540
 * @srf:            Pointer to the struct vmw_surface to initialize.
541
 * @res_free:       Pointer to a resource destructor used to free
542
 *                  the object.
543
 */
544
static int vmw_surface_init(struct vmw_private *dev_priv,
545
			    struct vmw_surface *srf,
546
			    void (*res_free) (struct vmw_resource *res))
547
{
548
	int ret;
549
	struct vmw_resource *res = &srf->res;
550
 
551
	BUG_ON(res_free == NULL);
552
	(void) vmw_3d_resource_inc(dev_priv, false);
553
	ret = vmw_resource_init(dev_priv, res, true, res_free,
554
				&vmw_legacy_surface_func);
555
 
556
	if (unlikely(ret != 0)) {
557
		vmw_3d_resource_dec(dev_priv, false);
558
		res_free(res);
559
		return ret;
560
	}
561
 
562
	/*
563
	 * The surface won't be visible to hardware until a
564
	 * surface validate.
565
	 */
566
 
567
	vmw_resource_activate(res, vmw_hw_surface_destroy);
568
	return ret;
569
}
570
 
571
/**
572
 * vmw_user_surface_base_to_res - TTM base object to resource converter for
573
 *                                user visible surfaces
574
 *
575
 * @base:           Pointer to a TTM base object
576
 *
577
 * Returns the struct vmw_resource embedded in a struct vmw_surface
578
 * for the user-visible object identified by the TTM base object @base.
579
 */
580
static struct vmw_resource *
581
vmw_user_surface_base_to_res(struct ttm_base_object *base)
582
{
583
	return &(container_of(base, struct vmw_user_surface, base)->srf.res);
584
}
585
 
586
/**
587
 * vmw_user_surface_free - User visible surface resource destructor
588
 *
589
 * @res:            A struct vmw_resource embedded in a struct vmw_surface.
590
 */
591
static void vmw_user_surface_free(struct vmw_resource *res)
592
{
593
	struct vmw_surface *srf = vmw_res_to_srf(res);
594
	struct vmw_user_surface *user_srf =
595
	    container_of(srf, struct vmw_user_surface, srf);
596
	struct vmw_private *dev_priv = srf->res.dev_priv;
597
	uint32_t size = user_srf->size;
598
 
599
	kfree(srf->offsets);
600
	kfree(srf->sizes);
601
	kfree(srf->snooper.image);
602
//   ttm_base_object_kfree(user_srf, base);
603
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
604
}
605
 
606
/**
607
 * vmw_user_surface_free - User visible surface TTM base object destructor
608
 *
609
 * @p_base:         Pointer to a pointer to a TTM base object
610
 *                  embedded in a struct vmw_user_surface.
611
 *
612
 * Drops the base object's reference on its resource, and the
613
 * pointer pointed to by *p_base is set to NULL.
614
 */
615
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
616
{
617
	struct ttm_base_object *base = *p_base;
618
	struct vmw_user_surface *user_srf =
619
	    container_of(base, struct vmw_user_surface, base);
620
	struct vmw_resource *res = &user_srf->srf.res;
621
 
622
	*p_base = NULL;
623
	vmw_resource_unreference(&res);
624
}
625
 
626
#if 0
627
/**
628
 * vmw_user_surface_define_ioctl - Ioctl function implementing
629
 *                                  the user surface define functionality.
630
 *
631
 * @dev:            Pointer to a struct drm_device.
632
 * @data:           Pointer to data copied from / to user-space.
633
 * @file_priv:      Pointer to a drm file private structure.
634
 */
635
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
636
			     struct drm_file *file_priv)
637
{
638
	struct vmw_private *dev_priv = vmw_priv(dev);
639
	struct vmw_user_surface *user_srf;
640
	struct vmw_surface *srf;
641
	struct vmw_resource *res;
642
	struct vmw_resource *tmp;
643
	union drm_vmw_surface_create_arg *arg =
644
	    (union drm_vmw_surface_create_arg *)data;
645
	struct drm_vmw_surface_create_req *req = &arg->req;
646
	struct drm_vmw_surface_arg *rep = &arg->rep;
647
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
648
	struct drm_vmw_size __user *user_sizes;
649
	int ret;
650
	int i, j;
651
	uint32_t cur_bo_offset;
652
	struct drm_vmw_size *cur_size;
653
	struct vmw_surface_offset *cur_offset;
654
	uint32_t num_sizes;
655
	uint32_t size;
656
	struct vmw_master *vmaster = vmw_master(file_priv->master);
657
	const struct svga3d_surface_desc *desc;
658
 
659
	if (unlikely(vmw_user_surface_size == 0))
660
		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
661
			128;
662
 
663
	num_sizes = 0;
664
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
665
		num_sizes += req->mip_levels[i];
666
 
667
	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
668
	    DRM_VMW_MAX_MIP_LEVELS)
669
		return -EINVAL;
670
 
671
	size = vmw_user_surface_size + 128 +
672
		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
673
		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
674
 
675
 
676
	desc = svga3dsurface_get_desc(req->format);
677
	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
678
		DRM_ERROR("Invalid surface format for surface creation.\n");
679
		return -EINVAL;
680
	}
681
 
682
	ret = ttm_read_lock(&vmaster->lock, true);
683
	if (unlikely(ret != 0))
684
		return ret;
685
 
686
	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
687
				   size, false, true);
688
	if (unlikely(ret != 0)) {
689
		if (ret != -ERESTARTSYS)
690
			DRM_ERROR("Out of graphics memory for surface"
691
				  " creation.\n");
692
		goto out_unlock;
693
	}
694
 
695
	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
696
	if (unlikely(user_srf == NULL)) {
697
		ret = -ENOMEM;
698
		goto out_no_user_srf;
699
	}
700
 
701
	srf = &user_srf->srf;
702
	res = &srf->res;
703
 
704
	srf->flags = req->flags;
705
	srf->format = req->format;
706
	srf->scanout = req->scanout;
707
 
708
	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
709
	srf->num_sizes = num_sizes;
710
	user_srf->size = size;
711
 
712
	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
713
	if (unlikely(srf->sizes == NULL)) {
714
		ret = -ENOMEM;
715
		goto out_no_sizes;
716
	}
717
	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
718
			       GFP_KERNEL);
719
	if (unlikely(srf->sizes == NULL)) {
720
		ret = -ENOMEM;
721
		goto out_no_offsets;
722
	}
723
 
724
	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
725
	    req->size_addr;
726
 
727
	ret = copy_from_user(srf->sizes, user_sizes,
728
			     srf->num_sizes * sizeof(*srf->sizes));
729
	if (unlikely(ret != 0)) {
730
		ret = -EFAULT;
731
		goto out_no_copy;
732
	}
733
 
734
	srf->base_size = *srf->sizes;
735
	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
736
	srf->multisample_count = 1;
737
 
738
	cur_bo_offset = 0;
739
	cur_offset = srf->offsets;
740
	cur_size = srf->sizes;
741
 
742
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
743
		for (j = 0; j < srf->mip_levels[i]; ++j) {
744
			uint32_t stride = svga3dsurface_calculate_pitch
745
				(desc, cur_size);
746
 
747
			cur_offset->face = i;
748
			cur_offset->mip = j;
749
			cur_offset->bo_offset = cur_bo_offset;
750
			cur_bo_offset += svga3dsurface_get_image_buffer_size
751
				(desc, cur_size, stride);
752
			++cur_offset;
753
			++cur_size;
754
		}
755
	}
756
	res->backup_size = cur_bo_offset;
757
	if (srf->scanout &&
758
	    srf->num_sizes == 1 &&
759
	    srf->sizes[0].width == 64 &&
760
	    srf->sizes[0].height == 64 &&
761
	    srf->format == SVGA3D_A8R8G8B8) {
762
 
763
		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
764
		/* clear the image */
765
		if (srf->snooper.image) {
766
			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
767
		} else {
768
			DRM_ERROR("Failed to allocate cursor_image\n");
769
			ret = -ENOMEM;
770
			goto out_no_copy;
771
		}
772
	} else {
773
		srf->snooper.image = NULL;
774
	}
775
	srf->snooper.crtc = NULL;
776
 
777
	user_srf->base.shareable = false;
778
	user_srf->base.tfile = NULL;
779
 
780
	/**
781
	 * From this point, the generic resource management functions
782
	 * destroy the object on failure.
783
	 */
784
 
785
	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
786
	if (unlikely(ret != 0))
787
		goto out_unlock;
788
 
789
	tmp = vmw_resource_reference(&srf->res);
790
	ret = ttm_base_object_init(tfile, &user_srf->base,
791
				   req->shareable, VMW_RES_SURFACE,
792
				   &vmw_user_surface_base_release, NULL);
793
 
794
	if (unlikely(ret != 0)) {
795
		vmw_resource_unreference(&tmp);
796
		vmw_resource_unreference(&res);
797
		goto out_unlock;
798
	}
799
 
800
	rep->sid = user_srf->base.hash.key;
801
	vmw_resource_unreference(&res);
802
 
803
	ttm_read_unlock(&vmaster->lock);
804
	return 0;
805
out_no_copy:
806
	kfree(srf->offsets);
807
out_no_offsets:
808
	kfree(srf->sizes);
809
out_no_sizes:
810
	ttm_base_object_kfree(user_srf, base);
811
out_no_user_srf:
812
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
813
out_unlock:
814
	ttm_read_unlock(&vmaster->lock);
815
	return ret;
816
}
817
 
818
/**
819
 * vmw_user_surface_define_ioctl - Ioctl function implementing
820
 *                                  the user surface reference functionality.
821
 *
822
 * @dev:            Pointer to a struct drm_device.
823
 * @data:           Pointer to data copied from / to user-space.
824
 * @file_priv:      Pointer to a drm file private structure.
825
 */
826
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
827
				struct drm_file *file_priv)
828
{
829
	union drm_vmw_surface_reference_arg *arg =
830
	    (union drm_vmw_surface_reference_arg *)data;
831
	struct drm_vmw_surface_arg *req = &arg->req;
832
	struct drm_vmw_surface_create_req *rep = &arg->rep;
833
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
834
	struct vmw_surface *srf;
835
	struct vmw_user_surface *user_srf;
836
	struct drm_vmw_size __user *user_sizes;
837
	struct ttm_base_object *base;
838
	int ret = -EINVAL;
839
 
840
	base = ttm_base_object_lookup(tfile, req->sid);
841
	if (unlikely(base == NULL)) {
842
		DRM_ERROR("Could not find surface to reference.\n");
843
		return -EINVAL;
844
	}
845
 
846
	if (unlikely(base->object_type != VMW_RES_SURFACE))
847
		goto out_bad_resource;
848
 
849
	user_srf = container_of(base, struct vmw_user_surface, base);
850
	srf = &user_srf->srf;
851
 
852
	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
853
	if (unlikely(ret != 0)) {
854
		DRM_ERROR("Could not add a reference to a surface.\n");
855
		goto out_no_reference;
856
	}
857
 
858
	rep->flags = srf->flags;
859
	rep->format = srf->format;
860
	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
861
	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
862
	    rep->size_addr;
863
 
864
	if (user_sizes)
865
		ret = copy_to_user(user_sizes, srf->sizes,
866
				   srf->num_sizes * sizeof(*srf->sizes));
867
	if (unlikely(ret != 0)) {
868
		DRM_ERROR("copy_to_user failed %p %u\n",
869
			  user_sizes, srf->num_sizes);
870
		ret = -EFAULT;
871
	}
872
out_bad_resource:
873
out_no_reference:
874
	ttm_base_object_unref(&base);
875
 
876
	return ret;
877
}
878
 
879
#endif