Subversion Repositories Kolibri OS

Rev

Rev 4569 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4569 Rev 5078
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
12
 * the following conditions:
13
 *
13
 *
14
 * The above copyright notice and this permission notice (including the
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
16
 * of the Software.
17
 *
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
27
 
27
 
28
#include "vmwgfx_drv.h"
28
#include "vmwgfx_drv.h"
29
#include "vmwgfx_resource_priv.h"
29
#include "vmwgfx_resource_priv.h"
30
#include 
30
#include 
31
#include "svga3d_surfacedefs.h"
31
#include "svga3d_surfacedefs.h"
32
 
32
 
33
/**
33
/**
34
 * struct vmw_user_surface - User-space visible surface resource
34
 * struct vmw_user_surface - User-space visible surface resource
35
 *
35
 *
36
 * @base:           The TTM base object handling user-space visibility.
36
 * @base:           The TTM base object handling user-space visibility.
37
 * @srf:            The surface metadata.
37
 * @srf:            The surface metadata.
38
 * @size:           TTM accounting size for the surface.
38
 * @size:           TTM accounting size for the surface.
-
 
39
 * @master:         master of the creating client. Used for security check.
39
 */
40
 */
40
struct vmw_user_surface {
41
struct vmw_user_surface {
41
	struct ttm_prime_object prime;
42
	struct ttm_prime_object prime;
42
	struct vmw_surface srf;
43
	struct vmw_surface srf;
43
	uint32_t size;
44
	uint32_t size;
44
};
45
};
45
 
46
 
46
/**
47
/**
47
 * struct vmw_surface_offset - Backing store mip level offset info
48
 * struct vmw_surface_offset - Backing store mip level offset info
48
 *
49
 *
49
 * @face:           Surface face.
50
 * @face:           Surface face.
50
 * @mip:            Mip level.
51
 * @mip:            Mip level.
51
 * @bo_offset:      Offset into backing store of this mip level.
52
 * @bo_offset:      Offset into backing store of this mip level.
52
 *
53
 *
53
 */
54
 */
54
struct vmw_surface_offset {
55
struct vmw_surface_offset {
55
	uint32_t face;
56
	uint32_t face;
56
	uint32_t mip;
57
	uint32_t mip;
57
	uint32_t bo_offset;
58
	uint32_t bo_offset;
58
};
59
};
59
 
60
 
60
static void vmw_user_surface_free(struct vmw_resource *res);
61
static void vmw_user_surface_free(struct vmw_resource *res);
61
static struct vmw_resource *
62
static struct vmw_resource *
62
vmw_user_surface_base_to_res(struct ttm_base_object *base);
63
vmw_user_surface_base_to_res(struct ttm_base_object *base);
63
static int vmw_legacy_srf_bind(struct vmw_resource *res,
64
static int vmw_legacy_srf_bind(struct vmw_resource *res,
64
			       struct ttm_validate_buffer *val_buf);
65
			       struct ttm_validate_buffer *val_buf);
65
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
66
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
66
				 bool readback,
67
				 bool readback,
67
				 struct ttm_validate_buffer *val_buf);
68
				 struct ttm_validate_buffer *val_buf);
68
static int vmw_legacy_srf_create(struct vmw_resource *res);
69
static int vmw_legacy_srf_create(struct vmw_resource *res);
69
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
70
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
70
static int vmw_gb_surface_create(struct vmw_resource *res);
71
static int vmw_gb_surface_create(struct vmw_resource *res);
71
static int vmw_gb_surface_bind(struct vmw_resource *res,
72
static int vmw_gb_surface_bind(struct vmw_resource *res,
72
			       struct ttm_validate_buffer *val_buf);
73
			       struct ttm_validate_buffer *val_buf);
73
static int vmw_gb_surface_unbind(struct vmw_resource *res,
74
static int vmw_gb_surface_unbind(struct vmw_resource *res,
74
				 bool readback,
75
				 bool readback,
75
				 struct ttm_validate_buffer *val_buf);
76
				 struct ttm_validate_buffer *val_buf);
76
static int vmw_gb_surface_destroy(struct vmw_resource *res);
77
static int vmw_gb_surface_destroy(struct vmw_resource *res);
77
 
78
 
78
 
79
 
79
static const struct vmw_user_resource_conv user_surface_conv = {
80
static const struct vmw_user_resource_conv user_surface_conv = {
80
	.object_type = VMW_RES_SURFACE,
81
	.object_type = VMW_RES_SURFACE,
81
	.base_obj_to_res = vmw_user_surface_base_to_res,
82
	.base_obj_to_res = vmw_user_surface_base_to_res,
82
	.res_free = vmw_user_surface_free
83
	.res_free = vmw_user_surface_free
83
};
84
};
84
 
85
 
85
const struct vmw_user_resource_conv *user_surface_converter =
86
const struct vmw_user_resource_conv *user_surface_converter =
86
	&user_surface_conv;
87
	&user_surface_conv;
87
 
88
 
88
 
89
 
89
static uint64_t vmw_user_surface_size;
90
static uint64_t vmw_user_surface_size;
90
 
91
 
91
static const struct vmw_res_func vmw_legacy_surface_func = {
92
static const struct vmw_res_func vmw_legacy_surface_func = {
92
	.res_type = vmw_res_surface,
93
	.res_type = vmw_res_surface,
93
	.needs_backup = false,
94
	.needs_backup = false,
94
	.may_evict = true,
95
	.may_evict = true,
95
	.type_name = "legacy surfaces",
96
	.type_name = "legacy surfaces",
96
	.backup_placement = &vmw_srf_placement,
97
	.backup_placement = &vmw_srf_placement,
97
	.create = &vmw_legacy_srf_create,
98
	.create = &vmw_legacy_srf_create,
98
	.destroy = &vmw_legacy_srf_destroy,
99
	.destroy = &vmw_legacy_srf_destroy,
99
	.bind = &vmw_legacy_srf_bind,
100
	.bind = &vmw_legacy_srf_bind,
100
	.unbind = &vmw_legacy_srf_unbind
101
	.unbind = &vmw_legacy_srf_unbind
101
};
102
};
102
 
103
 
103
static const struct vmw_res_func vmw_gb_surface_func = {
104
static const struct vmw_res_func vmw_gb_surface_func = {
104
	.res_type = vmw_res_surface,
105
	.res_type = vmw_res_surface,
105
	.needs_backup = true,
106
	.needs_backup = true,
106
	.may_evict = true,
107
	.may_evict = true,
107
	.type_name = "guest backed surfaces",
108
	.type_name = "guest backed surfaces",
108
	.backup_placement = &vmw_mob_placement,
109
	.backup_placement = &vmw_mob_placement,
109
	.create = vmw_gb_surface_create,
110
	.create = vmw_gb_surface_create,
110
	.destroy = vmw_gb_surface_destroy,
111
	.destroy = vmw_gb_surface_destroy,
111
	.bind = vmw_gb_surface_bind,
112
	.bind = vmw_gb_surface_bind,
112
	.unbind = vmw_gb_surface_unbind
113
	.unbind = vmw_gb_surface_unbind
113
};
114
};
114
 
115
 
115
/**
116
/**
116
 * struct vmw_surface_dma - SVGA3D DMA command
117
 * struct vmw_surface_dma - SVGA3D DMA command
117
 */
118
 */
118
struct vmw_surface_dma {
119
struct vmw_surface_dma {
119
	SVGA3dCmdHeader header;
120
	SVGA3dCmdHeader header;
120
	SVGA3dCmdSurfaceDMA body;
121
	SVGA3dCmdSurfaceDMA body;
121
	SVGA3dCopyBox cb;
122
	SVGA3dCopyBox cb;
122
	SVGA3dCmdSurfaceDMASuffix suffix;
123
	SVGA3dCmdSurfaceDMASuffix suffix;
123
};
124
};
124
 
125
 
125
/**
126
/**
126
 * struct vmw_surface_define - SVGA3D Surface Define command
127
 * struct vmw_surface_define - SVGA3D Surface Define command
127
 */
128
 */
128
struct vmw_surface_define {
129
struct vmw_surface_define {
129
	SVGA3dCmdHeader header;
130
	SVGA3dCmdHeader header;
130
	SVGA3dCmdDefineSurface body;
131
	SVGA3dCmdDefineSurface body;
131
};
132
};
132
 
133
 
133
/**
134
/**
134
 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
135
 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
135
 */
136
 */
136
struct vmw_surface_destroy {
137
struct vmw_surface_destroy {
137
	SVGA3dCmdHeader header;
138
	SVGA3dCmdHeader header;
138
	SVGA3dCmdDestroySurface body;
139
	SVGA3dCmdDestroySurface body;
139
};
140
};
140
 
141
 
141
 
142
 
142
/**
143
/**
143
 * vmw_surface_dma_size - Compute fifo size for a dma command.
144
 * vmw_surface_dma_size - Compute fifo size for a dma command.
144
 *
145
 *
145
 * @srf: Pointer to a struct vmw_surface
146
 * @srf: Pointer to a struct vmw_surface
146
 *
147
 *
147
 * Computes the required size for a surface dma command for backup or
148
 * Computes the required size for a surface dma command for backup or
148
 * restoration of the surface represented by @srf.
149
 * restoration of the surface represented by @srf.
149
 */
150
 */
150
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
151
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
151
{
152
{
152
	return srf->num_sizes * sizeof(struct vmw_surface_dma);
153
	return srf->num_sizes * sizeof(struct vmw_surface_dma);
153
}
154
}
154
 
155
 
155
 
156
 
156
/**
157
/**
157
 * vmw_surface_define_size - Compute fifo size for a surface define command.
158
 * vmw_surface_define_size - Compute fifo size for a surface define command.
158
 *
159
 *
159
 * @srf: Pointer to a struct vmw_surface
160
 * @srf: Pointer to a struct vmw_surface
160
 *
161
 *
161
 * Computes the required size for a surface define command for the definition
162
 * Computes the required size for a surface define command for the definition
162
 * of the surface represented by @srf.
163
 * of the surface represented by @srf.
163
 */
164
 */
164
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
165
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
165
{
166
{
166
	return sizeof(struct vmw_surface_define) + srf->num_sizes *
167
	return sizeof(struct vmw_surface_define) + srf->num_sizes *
167
		sizeof(SVGA3dSize);
168
		sizeof(SVGA3dSize);
168
}
169
}
169
 
170
 
170
 
171
 
171
/**
172
/**
172
 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
173
 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
173
 *
174
 *
174
 * Computes the required size for a surface destroy command for the destruction
175
 * Computes the required size for a surface destroy command for the destruction
175
 * of a hw surface.
176
 * of a hw surface.
176
 */
177
 */
177
static inline uint32_t vmw_surface_destroy_size(void)
178
static inline uint32_t vmw_surface_destroy_size(void)
178
{
179
{
179
	return sizeof(struct vmw_surface_destroy);
180
	return sizeof(struct vmw_surface_destroy);
180
}
181
}
181
 
182
 
182
/**
183
/**
183
 * vmw_surface_destroy_encode - Encode a surface_destroy command.
184
 * vmw_surface_destroy_encode - Encode a surface_destroy command.
184
 *
185
 *
185
 * @id: The surface id
186
 * @id: The surface id
186
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
187
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
187
 */
188
 */
188
static void vmw_surface_destroy_encode(uint32_t id,
189
static void vmw_surface_destroy_encode(uint32_t id,
189
				       void *cmd_space)
190
				       void *cmd_space)
190
{
191
{
191
	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
192
	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
192
		cmd_space;
193
		cmd_space;
193
 
194
 
194
	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
195
	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
195
	cmd->header.size = sizeof(cmd->body);
196
	cmd->header.size = sizeof(cmd->body);
196
	cmd->body.sid = id;
197
	cmd->body.sid = id;
197
}
198
}
198
 
199
 
199
/**
200
/**
200
 * vmw_surface_define_encode - Encode a surface_define command.
201
 * vmw_surface_define_encode - Encode a surface_define command.
201
 *
202
 *
202
 * @srf: Pointer to a struct vmw_surface object.
203
 * @srf: Pointer to a struct vmw_surface object.
203
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
204
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
204
 */
205
 */
205
static void vmw_surface_define_encode(const struct vmw_surface *srf,
206
static void vmw_surface_define_encode(const struct vmw_surface *srf,
206
				      void *cmd_space)
207
				      void *cmd_space)
207
{
208
{
208
	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
209
	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
209
		cmd_space;
210
		cmd_space;
210
	struct drm_vmw_size *src_size;
211
	struct drm_vmw_size *src_size;
211
	SVGA3dSize *cmd_size;
212
	SVGA3dSize *cmd_size;
212
	uint32_t cmd_len;
213
	uint32_t cmd_len;
213
	int i;
214
	int i;
214
 
215
 
215
	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
216
	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
216
 
217
 
217
	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
218
	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
218
	cmd->header.size = cmd_len;
219
	cmd->header.size = cmd_len;
219
	cmd->body.sid = srf->res.id;
220
	cmd->body.sid = srf->res.id;
220
	cmd->body.surfaceFlags = srf->flags;
221
	cmd->body.surfaceFlags = srf->flags;
221
	cmd->body.format = cpu_to_le32(srf->format);
222
	cmd->body.format = cpu_to_le32(srf->format);
222
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
223
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
223
		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
224
		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
224
 
225
 
225
	cmd += 1;
226
	cmd += 1;
226
	cmd_size = (SVGA3dSize *) cmd;
227
	cmd_size = (SVGA3dSize *) cmd;
227
	src_size = srf->sizes;
228
	src_size = srf->sizes;
228
 
229
 
229
	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
230
	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
230
		cmd_size->width = src_size->width;
231
		cmd_size->width = src_size->width;
231
		cmd_size->height = src_size->height;
232
		cmd_size->height = src_size->height;
232
		cmd_size->depth = src_size->depth;
233
		cmd_size->depth = src_size->depth;
233
	}
234
	}
234
}
235
}
235
 
236
 
236
/**
237
/**
237
 * vmw_surface_dma_encode - Encode a surface_dma command.
238
 * vmw_surface_dma_encode - Encode a surface_dma command.
238
 *
239
 *
239
 * @srf: Pointer to a struct vmw_surface object.
240
 * @srf: Pointer to a struct vmw_surface object.
240
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
241
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
241
 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
242
 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
242
 * should be placed or read from.
243
 * should be placed or read from.
243
 * @to_surface: Boolean whether to DMA to the surface or from the surface.
244
 * @to_surface: Boolean whether to DMA to the surface or from the surface.
244
 */
245
 */
245
static void vmw_surface_dma_encode(struct vmw_surface *srf,
246
static void vmw_surface_dma_encode(struct vmw_surface *srf,
246
				   void *cmd_space,
247
				   void *cmd_space,
247
				   const SVGAGuestPtr *ptr,
248
				   const SVGAGuestPtr *ptr,
248
				   bool to_surface)
249
				   bool to_surface)
249
{
250
{
250
	uint32_t i;
251
	uint32_t i;
251
	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
252
	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
252
	const struct svga3d_surface_desc *desc =
253
	const struct svga3d_surface_desc *desc =
253
		svga3dsurface_get_desc(srf->format);
254
		svga3dsurface_get_desc(srf->format);
254
 
255
 
255
	for (i = 0; i < srf->num_sizes; ++i) {
256
	for (i = 0; i < srf->num_sizes; ++i) {
256
		SVGA3dCmdHeader *header = &cmd->header;
257
		SVGA3dCmdHeader *header = &cmd->header;
257
		SVGA3dCmdSurfaceDMA *body = &cmd->body;
258
		SVGA3dCmdSurfaceDMA *body = &cmd->body;
258
		SVGA3dCopyBox *cb = &cmd->cb;
259
		SVGA3dCopyBox *cb = &cmd->cb;
259
		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
260
		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
260
		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
261
		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
261
		const struct drm_vmw_size *cur_size = &srf->sizes[i];
262
		const struct drm_vmw_size *cur_size = &srf->sizes[i];
262
 
263
 
263
		header->id = SVGA_3D_CMD_SURFACE_DMA;
264
		header->id = SVGA_3D_CMD_SURFACE_DMA;
264
		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
265
		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
265
 
266
 
266
		body->guest.ptr = *ptr;
267
		body->guest.ptr = *ptr;
267
		body->guest.ptr.offset += cur_offset->bo_offset;
268
		body->guest.ptr.offset += cur_offset->bo_offset;
268
		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
269
		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
269
								  cur_size);
270
								  cur_size);
270
		body->host.sid = srf->res.id;
271
		body->host.sid = srf->res.id;
271
		body->host.face = cur_offset->face;
272
		body->host.face = cur_offset->face;
272
		body->host.mipmap = cur_offset->mip;
273
		body->host.mipmap = cur_offset->mip;
273
		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
274
		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
274
				  SVGA3D_READ_HOST_VRAM);
275
				  SVGA3D_READ_HOST_VRAM);
275
		cb->x = 0;
276
		cb->x = 0;
276
		cb->y = 0;
277
		cb->y = 0;
277
		cb->z = 0;
278
		cb->z = 0;
278
		cb->srcx = 0;
279
		cb->srcx = 0;
279
		cb->srcy = 0;
280
		cb->srcy = 0;
280
		cb->srcz = 0;
281
		cb->srcz = 0;
281
		cb->w = cur_size->width;
282
		cb->w = cur_size->width;
282
		cb->h = cur_size->height;
283
		cb->h = cur_size->height;
283
		cb->d = cur_size->depth;
284
		cb->d = cur_size->depth;
284
 
285
 
285
		suffix->suffixSize = sizeof(*suffix);
286
		suffix->suffixSize = sizeof(*suffix);
286
		suffix->maximumOffset =
287
		suffix->maximumOffset =
287
			svga3dsurface_get_image_buffer_size(desc, cur_size,
288
			svga3dsurface_get_image_buffer_size(desc, cur_size,
288
							    body->guest.pitch);
289
							    body->guest.pitch);
289
		suffix->flags.discard = 0;
290
		suffix->flags.discard = 0;
290
		suffix->flags.unsynchronized = 0;
291
		suffix->flags.unsynchronized = 0;
291
		suffix->flags.reserved = 0;
292
		suffix->flags.reserved = 0;
292
		++cmd;
293
		++cmd;
293
	}
294
	}
294
};
295
};
295
 
296
 
296
 
297
 
297
/**
298
/**
298
 * vmw_hw_surface_destroy - destroy a Device surface
299
 * vmw_hw_surface_destroy - destroy a Device surface
299
 *
300
 *
300
 * @res:        Pointer to a struct vmw_resource embedded in a struct
301
 * @res:        Pointer to a struct vmw_resource embedded in a struct
301
 *              vmw_surface.
302
 *              vmw_surface.
302
 *
303
 *
303
 * Destroys a the device surface associated with a struct vmw_surface if
304
 * Destroys a the device surface associated with a struct vmw_surface if
304
 * any, and adjusts accounting and resource count accordingly.
305
 * any, and adjusts accounting and resource count accordingly.
305
 */
306
 */
306
static void vmw_hw_surface_destroy(struct vmw_resource *res)
307
static void vmw_hw_surface_destroy(struct vmw_resource *res)
307
{
308
{
308
 
309
 
309
	struct vmw_private *dev_priv = res->dev_priv;
310
	struct vmw_private *dev_priv = res->dev_priv;
310
	struct vmw_surface *srf;
311
	struct vmw_surface *srf;
311
	void *cmd;
312
	void *cmd;
312
 
313
 
313
	if (res->func->destroy == vmw_gb_surface_destroy) {
314
	if (res->func->destroy == vmw_gb_surface_destroy) {
314
		(void) vmw_gb_surface_destroy(res);
315
		(void) vmw_gb_surface_destroy(res);
315
		return;
316
		return;
316
	}
317
	}
317
 
318
 
318
	if (res->id != -1) {
319
	if (res->id != -1) {
319
 
320
 
320
		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
321
		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
321
		if (unlikely(cmd == NULL)) {
322
		if (unlikely(cmd == NULL)) {
322
			DRM_ERROR("Failed reserving FIFO space for surface "
323
			DRM_ERROR("Failed reserving FIFO space for surface "
323
				  "destruction.\n");
324
				  "destruction.\n");
324
			return;
325
			return;
325
		}
326
		}
326
 
327
 
327
		vmw_surface_destroy_encode(res->id, cmd);
328
		vmw_surface_destroy_encode(res->id, cmd);
328
		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
329
		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
329
 
330
 
330
		/*
331
		/*
331
		 * used_memory_size_atomic, or separate lock
332
		 * used_memory_size_atomic, or separate lock
332
		 * to avoid taking dev_priv::cmdbuf_mutex in
333
		 * to avoid taking dev_priv::cmdbuf_mutex in
333
		 * the destroy path.
334
		 * the destroy path.
334
		 */
335
		 */
335
 
336
 
336
		mutex_lock(&dev_priv->cmdbuf_mutex);
337
		mutex_lock(&dev_priv->cmdbuf_mutex);
337
		srf = vmw_res_to_srf(res);
338
		srf = vmw_res_to_srf(res);
338
		dev_priv->used_memory_size -= res->backup_size;
339
		dev_priv->used_memory_size -= res->backup_size;
339
		mutex_unlock(&dev_priv->cmdbuf_mutex);
340
		mutex_unlock(&dev_priv->cmdbuf_mutex);
340
	}
341
	}
341
	vmw_3d_resource_dec(dev_priv, false);
342
	vmw_3d_resource_dec(dev_priv, false);
342
}
343
}
343
 
344
 
344
/**
345
/**
345
 * vmw_legacy_srf_create - Create a device surface as part of the
346
 * vmw_legacy_srf_create - Create a device surface as part of the
346
 * resource validation process.
347
 * resource validation process.
347
 *
348
 *
348
 * @res: Pointer to a struct vmw_surface.
349
 * @res: Pointer to a struct vmw_surface.
349
 *
350
 *
350
 * If the surface doesn't have a hw id.
351
 * If the surface doesn't have a hw id.
351
 *
352
 *
352
 * Returns -EBUSY if there wasn't sufficient device resources to
353
 * Returns -EBUSY if there wasn't sufficient device resources to
353
 * complete the validation. Retry after freeing up resources.
354
 * complete the validation. Retry after freeing up resources.
354
 *
355
 *
355
 * May return other errors if the kernel is out of guest resources.
356
 * May return other errors if the kernel is out of guest resources.
356
 */
357
 */
357
static int vmw_legacy_srf_create(struct vmw_resource *res)
358
static int vmw_legacy_srf_create(struct vmw_resource *res)
358
{
359
{
359
	struct vmw_private *dev_priv = res->dev_priv;
360
	struct vmw_private *dev_priv = res->dev_priv;
360
	struct vmw_surface *srf;
361
	struct vmw_surface *srf;
361
	uint32_t submit_size;
362
	uint32_t submit_size;
362
	uint8_t *cmd;
363
	uint8_t *cmd;
363
	int ret;
364
	int ret;
364
 
365
 
365
	if (likely(res->id != -1))
366
	if (likely(res->id != -1))
366
		return 0;
367
		return 0;
367
 
368
 
368
	srf = vmw_res_to_srf(res);
369
	srf = vmw_res_to_srf(res);
369
	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
370
	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
370
		     dev_priv->memory_size))
371
		     dev_priv->memory_size))
371
		return -EBUSY;
372
		return -EBUSY;
372
 
373
 
373
	/*
374
	/*
374
	 * Alloc id for the resource.
375
	 * Alloc id for the resource.
375
	 */
376
	 */
376
 
377
 
377
	ret = vmw_resource_alloc_id(res);
378
	ret = vmw_resource_alloc_id(res);
378
	if (unlikely(ret != 0)) {
379
	if (unlikely(ret != 0)) {
379
		DRM_ERROR("Failed to allocate a surface id.\n");
380
		DRM_ERROR("Failed to allocate a surface id.\n");
380
		goto out_no_id;
381
		goto out_no_id;
381
	}
382
	}
382
 
383
 
383
	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
384
	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
384
		ret = -EBUSY;
385
		ret = -EBUSY;
385
		goto out_no_fifo;
386
		goto out_no_fifo;
386
	}
387
	}
387
 
388
 
388
	/*
389
	/*
389
	 * Encode surface define- commands.
390
	 * Encode surface define- commands.
390
	 */
391
	 */
391
 
392
 
392
	submit_size = vmw_surface_define_size(srf);
393
	submit_size = vmw_surface_define_size(srf);
393
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
394
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
394
	if (unlikely(cmd == NULL)) {
395
	if (unlikely(cmd == NULL)) {
395
		DRM_ERROR("Failed reserving FIFO space for surface "
396
		DRM_ERROR("Failed reserving FIFO space for surface "
396
			  "creation.\n");
397
			  "creation.\n");
397
		ret = -ENOMEM;
398
		ret = -ENOMEM;
398
		goto out_no_fifo;
399
		goto out_no_fifo;
399
	}
400
	}
400
 
401
 
401
	vmw_surface_define_encode(srf, cmd);
402
	vmw_surface_define_encode(srf, cmd);
402
	vmw_fifo_commit(dev_priv, submit_size);
403
	vmw_fifo_commit(dev_priv, submit_size);
403
	/*
404
	/*
404
	 * Surface memory usage accounting.
405
	 * Surface memory usage accounting.
405
	 */
406
	 */
406
 
407
 
407
	dev_priv->used_memory_size += res->backup_size;
408
	dev_priv->used_memory_size += res->backup_size;
408
	return 0;
409
	return 0;
409
 
410
 
410
out_no_fifo:
411
out_no_fifo:
411
	vmw_resource_release_id(res);
412
	vmw_resource_release_id(res);
412
out_no_id:
413
out_no_id:
413
	return ret;
414
	return ret;
414
}
415
}
415
 
416
 
416
/**
417
/**
417
 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
418
 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
418
 *
419
 *
419
 * @res:            Pointer to a struct vmw_res embedded in a struct
420
 * @res:            Pointer to a struct vmw_res embedded in a struct
420
 *                  vmw_surface.
421
 *                  vmw_surface.
421
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
422
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
422
 *                  information about the backup buffer.
423
 *                  information about the backup buffer.
423
 * @bind:           Boolean wether to DMA to the surface.
424
 * @bind:           Boolean wether to DMA to the surface.
424
 *
425
 *
425
 * Transfer backup data to or from a legacy surface as part of the
426
 * Transfer backup data to or from a legacy surface as part of the
426
 * validation process.
427
 * validation process.
427
 * May return other errors if the kernel is out of guest resources.
428
 * May return other errors if the kernel is out of guest resources.
428
 * The backup buffer will be fenced or idle upon successful completion,
429
 * The backup buffer will be fenced or idle upon successful completion,
429
 * and if the surface needs persistent backup storage, the backup buffer
430
 * and if the surface needs persistent backup storage, the backup buffer
430
 * will also be returned reserved iff @bind is true.
431
 * will also be returned reserved iff @bind is true.
431
 */
432
 */
432
static int vmw_legacy_srf_dma(struct vmw_resource *res,
433
static int vmw_legacy_srf_dma(struct vmw_resource *res,
433
			      struct ttm_validate_buffer *val_buf,
434
			      struct ttm_validate_buffer *val_buf,
434
			      bool bind)
435
			      bool bind)
435
{
436
{
436
	SVGAGuestPtr ptr;
437
	SVGAGuestPtr ptr;
437
	struct vmw_fence_obj *fence;
438
	struct vmw_fence_obj *fence;
438
	uint32_t submit_size;
439
	uint32_t submit_size;
439
	struct vmw_surface *srf = vmw_res_to_srf(res);
440
	struct vmw_surface *srf = vmw_res_to_srf(res);
440
	uint8_t *cmd;
441
	uint8_t *cmd;
441
	struct vmw_private *dev_priv = res->dev_priv;
442
	struct vmw_private *dev_priv = res->dev_priv;
442
 
443
 
443
	BUG_ON(val_buf->bo == NULL);
444
	BUG_ON(val_buf->bo == NULL);
444
 
445
 
445
	submit_size = vmw_surface_dma_size(srf);
446
	submit_size = vmw_surface_dma_size(srf);
446
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
447
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
447
	if (unlikely(cmd == NULL)) {
448
	if (unlikely(cmd == NULL)) {
448
		DRM_ERROR("Failed reserving FIFO space for surface "
449
		DRM_ERROR("Failed reserving FIFO space for surface "
449
			  "DMA.\n");
450
			  "DMA.\n");
450
		return -ENOMEM;
451
		return -ENOMEM;
451
	}
452
	}
452
	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
453
	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
453
	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
454
	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
454
 
455
 
455
	vmw_fifo_commit(dev_priv, submit_size);
456
	vmw_fifo_commit(dev_priv, submit_size);
456
 
457
 
457
	/*
458
	/*
458
	 * Create a fence object and fence the backup buffer.
459
	 * Create a fence object and fence the backup buffer.
459
	 */
460
	 */
460
 
461
 
461
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
462
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
462
					  &fence, NULL);
463
					  &fence, NULL);
463
 
464
 
464
	vmw_fence_single_bo(val_buf->bo, fence);
465
	vmw_fence_single_bo(val_buf->bo, fence);
465
 
466
 
466
	if (likely(fence != NULL))
467
	if (likely(fence != NULL))
467
		vmw_fence_obj_unreference(&fence);
468
		vmw_fence_obj_unreference(&fence);
468
 
469
 
469
	return 0;
470
	return 0;
470
}
471
}
471
 
472
 
472
/**
473
/**
473
 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
474
 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
474
 *                       surface validation process.
475
 *                       surface validation process.
475
 *
476
 *
476
 * @res:            Pointer to a struct vmw_res embedded in a struct
477
 * @res:            Pointer to a struct vmw_res embedded in a struct
477
 *                  vmw_surface.
478
 *                  vmw_surface.
478
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
479
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
479
 *                  information about the backup buffer.
480
 *                  information about the backup buffer.
480
 *
481
 *
481
 * This function will copy backup data to the surface if the
482
 * This function will copy backup data to the surface if the
482
 * backup buffer is dirty.
483
 * backup buffer is dirty.
483
 */
484
 */
484
static int vmw_legacy_srf_bind(struct vmw_resource *res,
485
static int vmw_legacy_srf_bind(struct vmw_resource *res,
485
			       struct ttm_validate_buffer *val_buf)
486
			       struct ttm_validate_buffer *val_buf)
486
{
487
{
487
	if (!res->backup_dirty)
488
	if (!res->backup_dirty)
488
		return 0;
489
		return 0;
489
 
490
 
490
	return vmw_legacy_srf_dma(res, val_buf, true);
491
	return vmw_legacy_srf_dma(res, val_buf, true);
491
}
492
}
492
 
493
 
493
 
494
 
494
/**
495
/**
495
 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
496
 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
496
 *                         surface eviction process.
497
 *                         surface eviction process.
497
 *
498
 *
498
 * @res:            Pointer to a struct vmw_res embedded in a struct
499
 * @res:            Pointer to a struct vmw_res embedded in a struct
499
 *                  vmw_surface.
500
 *                  vmw_surface.
500
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
501
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
501
 *                  information about the backup buffer.
502
 *                  information about the backup buffer.
502
 *
503
 *
503
 * This function will copy backup data from the surface.
504
 * This function will copy backup data from the surface.
504
 */
505
 */
505
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
506
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
506
				 bool readback,
507
				 bool readback,
507
				 struct ttm_validate_buffer *val_buf)
508
				 struct ttm_validate_buffer *val_buf)
508
{
509
{
509
	if (unlikely(readback))
510
	if (unlikely(readback))
510
		return vmw_legacy_srf_dma(res, val_buf, false);
511
		return vmw_legacy_srf_dma(res, val_buf, false);
511
	return 0;
512
	return 0;
512
}
513
}
513
 
514
 
514
/**
515
/**
515
 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
516
 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
516
 *                          resource eviction process.
517
 *                          resource eviction process.
517
 *
518
 *
518
 * @res:            Pointer to a struct vmw_res embedded in a struct
519
 * @res:            Pointer to a struct vmw_res embedded in a struct
519
 *                  vmw_surface.
520
 *                  vmw_surface.
520
 */
521
 */
521
static int vmw_legacy_srf_destroy(struct vmw_resource *res)
522
static int vmw_legacy_srf_destroy(struct vmw_resource *res)
522
{
523
{
523
	struct vmw_private *dev_priv = res->dev_priv;
524
	struct vmw_private *dev_priv = res->dev_priv;
524
	uint32_t submit_size;
525
	uint32_t submit_size;
525
	uint8_t *cmd;
526
	uint8_t *cmd;
526
 
527
 
527
	BUG_ON(res->id == -1);
528
	BUG_ON(res->id == -1);
528
 
529
 
529
	/*
530
	/*
530
	 * Encode the dma- and surface destroy commands.
531
	 * Encode the dma- and surface destroy commands.
531
	 */
532
	 */
532
 
533
 
533
	submit_size = vmw_surface_destroy_size();
534
	submit_size = vmw_surface_destroy_size();
534
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
535
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
535
	if (unlikely(cmd == NULL)) {
536
	if (unlikely(cmd == NULL)) {
536
		DRM_ERROR("Failed reserving FIFO space for surface "
537
		DRM_ERROR("Failed reserving FIFO space for surface "
537
			  "eviction.\n");
538
			  "eviction.\n");
538
		return -ENOMEM;
539
		return -ENOMEM;
539
	}
540
	}
540
 
541
 
541
	vmw_surface_destroy_encode(res->id, cmd);
542
	vmw_surface_destroy_encode(res->id, cmd);
542
	vmw_fifo_commit(dev_priv, submit_size);
543
	vmw_fifo_commit(dev_priv, submit_size);
543
 
544
 
544
	/*
545
	/*
545
	 * Surface memory usage accounting.
546
	 * Surface memory usage accounting.
546
	 */
547
	 */
547
 
548
 
548
	dev_priv->used_memory_size -= res->backup_size;
549
	dev_priv->used_memory_size -= res->backup_size;
549
 
550
 
550
	/*
551
	/*
551
	 * Release the surface ID.
552
	 * Release the surface ID.
552
	 */
553
	 */
553
 
554
 
554
	vmw_resource_release_id(res);
555
	vmw_resource_release_id(res);
555
 
556
 
556
	return 0;
557
	return 0;
557
}
558
}
558
 
559
 
559
 
560
 
560
/**
561
/**
561
 * vmw_surface_init - initialize a struct vmw_surface
562
 * vmw_surface_init - initialize a struct vmw_surface
562
 *
563
 *
563
 * @dev_priv:       Pointer to a device private struct.
564
 * @dev_priv:       Pointer to a device private struct.
564
 * @srf:            Pointer to the struct vmw_surface to initialize.
565
 * @srf:            Pointer to the struct vmw_surface to initialize.
565
 * @res_free:       Pointer to a resource destructor used to free
566
 * @res_free:       Pointer to a resource destructor used to free
566
 *                  the object.
567
 *                  the object.
567
 */
568
 */
568
static int vmw_surface_init(struct vmw_private *dev_priv,
569
static int vmw_surface_init(struct vmw_private *dev_priv,
569
			    struct vmw_surface *srf,
570
			    struct vmw_surface *srf,
570
			    void (*res_free) (struct vmw_resource *res))
571
			    void (*res_free) (struct vmw_resource *res))
571
{
572
{
572
	int ret;
573
	int ret;
573
	struct vmw_resource *res = &srf->res;
574
	struct vmw_resource *res = &srf->res;
574
 
575
 
575
	BUG_ON(res_free == NULL);
576
	BUG_ON(res_free == NULL);
576
	if (!dev_priv->has_mob)
577
	if (!dev_priv->has_mob)
577
	(void) vmw_3d_resource_inc(dev_priv, false);
578
	(void) vmw_3d_resource_inc(dev_priv, false);
578
	ret = vmw_resource_init(dev_priv, res, true, res_free,
579
	ret = vmw_resource_init(dev_priv, res, true, res_free,
579
				(dev_priv->has_mob) ? &vmw_gb_surface_func :
580
				(dev_priv->has_mob) ? &vmw_gb_surface_func :
580
				&vmw_legacy_surface_func);
581
				&vmw_legacy_surface_func);
581
 
582
 
582
	if (unlikely(ret != 0)) {
583
	if (unlikely(ret != 0)) {
583
		if (!dev_priv->has_mob)
584
		if (!dev_priv->has_mob)
584
		vmw_3d_resource_dec(dev_priv, false);
585
		vmw_3d_resource_dec(dev_priv, false);
585
		res_free(res);
586
		res_free(res);
586
		return ret;
587
		return ret;
587
	}
588
	}
588
 
589
 
589
	/*
590
	/*
590
	 * The surface won't be visible to hardware until a
591
	 * The surface won't be visible to hardware until a
591
	 * surface validate.
592
	 * surface validate.
592
	 */
593
	 */
593
 
594
 
594
	vmw_resource_activate(res, vmw_hw_surface_destroy);
595
	vmw_resource_activate(res, vmw_hw_surface_destroy);
595
	return ret;
596
	return ret;
596
}
597
}
597
 
598
 
598
/**
599
/**
599
 * vmw_user_surface_base_to_res - TTM base object to resource converter for
600
 * vmw_user_surface_base_to_res - TTM base object to resource converter for
600
 *                                user visible surfaces
601
 *                                user visible surfaces
601
 *
602
 *
602
 * @base:           Pointer to a TTM base object
603
 * @base:           Pointer to a TTM base object
603
 *
604
 *
604
 * Returns the struct vmw_resource embedded in a struct vmw_surface
605
 * Returns the struct vmw_resource embedded in a struct vmw_surface
605
 * for the user-visible object identified by the TTM base object @base.
606
 * for the user-visible object identified by the TTM base object @base.
606
 */
607
 */
607
static struct vmw_resource *
608
static struct vmw_resource *
608
vmw_user_surface_base_to_res(struct ttm_base_object *base)
609
vmw_user_surface_base_to_res(struct ttm_base_object *base)
609
{
610
{
610
	return &(container_of(base, struct vmw_user_surface,
611
	return &(container_of(base, struct vmw_user_surface,
611
			      prime.base)->srf.res);
612
			      prime.base)->srf.res);
612
}
613
}
613
 
614
 
614
/**
615
/**
615
 * vmw_user_surface_free - User visible surface resource destructor
616
 * vmw_user_surface_free - User visible surface resource destructor
616
 *
617
 *
617
 * @res:            A struct vmw_resource embedded in a struct vmw_surface.
618
 * @res:            A struct vmw_resource embedded in a struct vmw_surface.
618
 */
619
 */
619
static void vmw_user_surface_free(struct vmw_resource *res)
620
static void vmw_user_surface_free(struct vmw_resource *res)
620
{
621
{
621
	struct vmw_surface *srf = vmw_res_to_srf(res);
622
	struct vmw_surface *srf = vmw_res_to_srf(res);
622
	struct vmw_user_surface *user_srf =
623
	struct vmw_user_surface *user_srf =
623
	    container_of(srf, struct vmw_user_surface, srf);
624
	    container_of(srf, struct vmw_user_surface, srf);
624
	struct vmw_private *dev_priv = srf->res.dev_priv;
625
	struct vmw_private *dev_priv = srf->res.dev_priv;
625
	uint32_t size = user_srf->size;
626
	uint32_t size = user_srf->size;
626
 
627
 
627
	kfree(srf->offsets);
628
	kfree(srf->offsets);
628
	kfree(srf->sizes);
629
	kfree(srf->sizes);
629
	kfree(srf->snooper.image);
630
	kfree(srf->snooper.image);
630
//   ttm_base_object_kfree(user_srf, base);
631
//   ttm_base_object_kfree(user_srf, base);
631
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
632
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
632
}
633
}
633
 
634
 
634
/**
635
/**
635
 * vmw_user_surface_free - User visible surface TTM base object destructor
636
 * vmw_user_surface_free - User visible surface TTM base object destructor
636
 *
637
 *
637
 * @p_base:         Pointer to a pointer to a TTM base object
638
 * @p_base:         Pointer to a pointer to a TTM base object
638
 *                  embedded in a struct vmw_user_surface.
639
 *                  embedded in a struct vmw_user_surface.
639
 *
640
 *
640
 * Drops the base object's reference on its resource, and the
641
 * Drops the base object's reference on its resource, and the
641
 * pointer pointed to by *p_base is set to NULL.
642
 * pointer pointed to by *p_base is set to NULL.
642
 */
643
 */
643
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
644
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
644
{
645
{
645
	struct ttm_base_object *base = *p_base;
646
	struct ttm_base_object *base = *p_base;
646
	struct vmw_user_surface *user_srf =
647
	struct vmw_user_surface *user_srf =
647
	    container_of(base, struct vmw_user_surface, prime.base);
648
	    container_of(base, struct vmw_user_surface, prime.base);
648
	struct vmw_resource *res = &user_srf->srf.res;
649
	struct vmw_resource *res = &user_srf->srf.res;
649
 
650
 
650
	*p_base = NULL;
651
	*p_base = NULL;
651
	vmw_resource_unreference(&res);
652
	vmw_resource_unreference(&res);
652
}
653
}
653
 
654
 
654
#if 0
655
#if 0
655
/**
656
/**
656
 * vmw_user_surface_define_ioctl - Ioctl function implementing
657
 * vmw_user_surface_define_ioctl - Ioctl function implementing
657
 *                                  the user surface define functionality.
658
 *                                  the user surface define functionality.
658
 *
659
 *
659
 * @dev:            Pointer to a struct drm_device.
660
 * @dev:            Pointer to a struct drm_device.
660
 * @data:           Pointer to data copied from / to user-space.
661
 * @data:           Pointer to data copied from / to user-space.
661
 * @file_priv:      Pointer to a drm file private structure.
662
 * @file_priv:      Pointer to a drm file private structure.
662
 */
663
 */
663
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
664
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
664
			     struct drm_file *file_priv)
665
			     struct drm_file *file_priv)
665
{
666
{
666
	struct vmw_private *dev_priv = vmw_priv(dev);
667
	struct vmw_private *dev_priv = vmw_priv(dev);
667
	struct vmw_user_surface *user_srf;
668
	struct vmw_user_surface *user_srf;
668
	struct vmw_surface *srf;
669
	struct vmw_surface *srf;
669
	struct vmw_resource *res;
670
	struct vmw_resource *res;
670
	struct vmw_resource *tmp;
671
	struct vmw_resource *tmp;
671
	union drm_vmw_surface_create_arg *arg =
672
	union drm_vmw_surface_create_arg *arg =
672
	    (union drm_vmw_surface_create_arg *)data;
673
	    (union drm_vmw_surface_create_arg *)data;
673
	struct drm_vmw_surface_create_req *req = &arg->req;
674
	struct drm_vmw_surface_create_req *req = &arg->req;
674
	struct drm_vmw_surface_arg *rep = &arg->rep;
675
	struct drm_vmw_surface_arg *rep = &arg->rep;
675
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
676
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
676
	struct drm_vmw_size __user *user_sizes;
677
	struct drm_vmw_size __user *user_sizes;
677
	int ret;
678
	int ret;
678
	int i, j;
679
	int i, j;
679
	uint32_t cur_bo_offset;
680
	uint32_t cur_bo_offset;
680
	struct drm_vmw_size *cur_size;
681
	struct drm_vmw_size *cur_size;
681
	struct vmw_surface_offset *cur_offset;
682
	struct vmw_surface_offset *cur_offset;
682
	uint32_t num_sizes;
683
	uint32_t num_sizes;
683
	uint32_t size;
684
	uint32_t size;
684
	struct vmw_master *vmaster = vmw_master(file_priv->master);
-
 
685
	const struct svga3d_surface_desc *desc;
685
	const struct svga3d_surface_desc *desc;
686
 
686
 
687
	if (unlikely(vmw_user_surface_size == 0))
687
	if (unlikely(vmw_user_surface_size == 0))
688
		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
688
		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
689
			128;
689
			128;
690
 
690
 
691
	num_sizes = 0;
691
	num_sizes = 0;
692
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
692
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
693
		num_sizes += req->mip_levels[i];
693
		num_sizes += req->mip_levels[i];
694
 
694
 
695
	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
695
	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
696
	    DRM_VMW_MAX_MIP_LEVELS)
696
	    DRM_VMW_MAX_MIP_LEVELS)
697
		return -EINVAL;
697
		return -EINVAL;
698
 
698
 
699
	size = vmw_user_surface_size + 128 +
699
	size = vmw_user_surface_size + 128 +
700
		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
700
		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
701
		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
701
		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
702
 
702
 
703
 
703
 
704
	desc = svga3dsurface_get_desc(req->format);
704
	desc = svga3dsurface_get_desc(req->format);
705
	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
705
	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
706
		DRM_ERROR("Invalid surface format for surface creation.\n");
706
		DRM_ERROR("Invalid surface format for surface creation.\n");
707
		return -EINVAL;
707
		return -EINVAL;
708
	}
708
	}
709
 
709
 
710
	ret = ttm_read_lock(&vmaster->lock, true);
710
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
711
	if (unlikely(ret != 0))
711
	if (unlikely(ret != 0))
712
		return ret;
712
		return ret;
713
 
713
 
714
	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
714
	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
715
				   size, false, true);
715
				   size, false, true);
716
	if (unlikely(ret != 0)) {
716
	if (unlikely(ret != 0)) {
717
		if (ret != -ERESTARTSYS)
717
		if (ret != -ERESTARTSYS)
718
			DRM_ERROR("Out of graphics memory for surface"
718
			DRM_ERROR("Out of graphics memory for surface"
719
				  " creation.\n");
719
				  " creation.\n");
720
		goto out_unlock;
720
		goto out_unlock;
721
	}
721
	}
722
 
722
 
723
	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
723
	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
724
	if (unlikely(user_srf == NULL)) {
724
	if (unlikely(user_srf == NULL)) {
725
		ret = -ENOMEM;
725
		ret = -ENOMEM;
726
		goto out_no_user_srf;
726
		goto out_no_user_srf;
727
	}
727
	}
728
 
728
 
729
	srf = &user_srf->srf;
729
	srf = &user_srf->srf;
730
	res = &srf->res;
730
	res = &srf->res;
731
 
731
 
732
	srf->flags = req->flags;
732
	srf->flags = req->flags;
733
	srf->format = req->format;
733
	srf->format = req->format;
734
	srf->scanout = req->scanout;
734
	srf->scanout = req->scanout;
735
 
735
 
736
	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
736
	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
737
	srf->num_sizes = num_sizes;
737
	srf->num_sizes = num_sizes;
738
	user_srf->size = size;
738
	user_srf->size = size;
739
 
739
 
740
	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
740
	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
741
	if (unlikely(srf->sizes == NULL)) {
741
	if (unlikely(srf->sizes == NULL)) {
742
		ret = -ENOMEM;
742
		ret = -ENOMEM;
743
		goto out_no_sizes;
743
		goto out_no_sizes;
744
	}
744
	}
745
	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
745
	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
746
			       GFP_KERNEL);
746
			       GFP_KERNEL);
747
	if (unlikely(srf->sizes == NULL)) {
747
	if (unlikely(srf->sizes == NULL)) {
748
		ret = -ENOMEM;
748
		ret = -ENOMEM;
749
		goto out_no_offsets;
749
		goto out_no_offsets;
750
	}
750
	}
751
 
751
 
752
	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
752
	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
753
	    req->size_addr;
753
	    req->size_addr;
754
 
754
 
755
	ret = copy_from_user(srf->sizes, user_sizes,
755
	ret = copy_from_user(srf->sizes, user_sizes,
756
			     srf->num_sizes * sizeof(*srf->sizes));
756
			     srf->num_sizes * sizeof(*srf->sizes));
757
	if (unlikely(ret != 0)) {
757
	if (unlikely(ret != 0)) {
758
		ret = -EFAULT;
758
		ret = -EFAULT;
759
		goto out_no_copy;
759
		goto out_no_copy;
760
	}
760
	}
761
 
761
 
762
	srf->base_size = *srf->sizes;
762
	srf->base_size = *srf->sizes;
763
	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
763
	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
764
	srf->multisample_count = 0;
764
	srf->multisample_count = 0;
765
 
765
 
766
	cur_bo_offset = 0;
766
	cur_bo_offset = 0;
767
	cur_offset = srf->offsets;
767
	cur_offset = srf->offsets;
768
	cur_size = srf->sizes;
768
	cur_size = srf->sizes;
769
 
769
 
770
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
770
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
771
		for (j = 0; j < srf->mip_levels[i]; ++j) {
771
		for (j = 0; j < srf->mip_levels[i]; ++j) {
772
			uint32_t stride = svga3dsurface_calculate_pitch
772
			uint32_t stride = svga3dsurface_calculate_pitch
773
				(desc, cur_size);
773
				(desc, cur_size);
774
 
774
 
775
			cur_offset->face = i;
775
			cur_offset->face = i;
776
			cur_offset->mip = j;
776
			cur_offset->mip = j;
777
			cur_offset->bo_offset = cur_bo_offset;
777
			cur_offset->bo_offset = cur_bo_offset;
778
			cur_bo_offset += svga3dsurface_get_image_buffer_size
778
			cur_bo_offset += svga3dsurface_get_image_buffer_size
779
				(desc, cur_size, stride);
779
				(desc, cur_size, stride);
780
			++cur_offset;
780
			++cur_offset;
781
			++cur_size;
781
			++cur_size;
782
		}
782
		}
783
	}
783
	}
784
	res->backup_size = cur_bo_offset;
784
	res->backup_size = cur_bo_offset;
785
	if (srf->scanout &&
785
	if (srf->scanout &&
786
	    srf->num_sizes == 1 &&
786
	    srf->num_sizes == 1 &&
787
	    srf->sizes[0].width == 64 &&
787
	    srf->sizes[0].width == 64 &&
788
	    srf->sizes[0].height == 64 &&
788
	    srf->sizes[0].height == 64 &&
789
	    srf->format == SVGA3D_A8R8G8B8) {
789
	    srf->format == SVGA3D_A8R8G8B8) {
790
 
790
 
791
		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
791
		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
792
		/* clear the image */
792
		/* clear the image */
793
		if (srf->snooper.image) {
793
		if (srf->snooper.image) {
794
			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
794
			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
795
		} else {
795
		} else {
796
			DRM_ERROR("Failed to allocate cursor_image\n");
796
			DRM_ERROR("Failed to allocate cursor_image\n");
797
			ret = -ENOMEM;
797
			ret = -ENOMEM;
798
			goto out_no_copy;
798
			goto out_no_copy;
799
		}
799
		}
800
	} else {
800
	} else {
801
		srf->snooper.image = NULL;
801
		srf->snooper.image = NULL;
802
	}
802
	}
803
	srf->snooper.crtc = NULL;
803
	srf->snooper.crtc = NULL;
804
 
804
 
805
	user_srf->prime.base.shareable = false;
805
	user_srf->prime.base.shareable = false;
806
	user_srf->prime.base.tfile = NULL;
806
	user_srf->prime.base.tfile = NULL;
807
 
807
 
808
	/**
808
	/**
809
	 * From this point, the generic resource management functions
809
	 * From this point, the generic resource management functions
810
	 * destroy the object on failure.
810
	 * destroy the object on failure.
811
	 */
811
	 */
812
 
812
 
813
	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
813
	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
814
	if (unlikely(ret != 0))
814
	if (unlikely(ret != 0))
815
		goto out_unlock;
815
		goto out_unlock;
816
 
816
 
817
	tmp = vmw_resource_reference(&srf->res);
817
	tmp = vmw_resource_reference(&srf->res);
818
	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
818
	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
819
				   req->shareable, VMW_RES_SURFACE,
819
				   req->shareable, VMW_RES_SURFACE,
820
				   &vmw_user_surface_base_release, NULL);
820
				   &vmw_user_surface_base_release, NULL);
821
 
821
 
822
	if (unlikely(ret != 0)) {
822
	if (unlikely(ret != 0)) {
823
		vmw_resource_unreference(&tmp);
823
		vmw_resource_unreference(&tmp);
824
		vmw_resource_unreference(&res);
824
		vmw_resource_unreference(&res);
825
		goto out_unlock;
825
		goto out_unlock;
826
	}
826
	}
827
 
827
 
828
	rep->sid = user_srf->prime.base.hash.key;
828
	rep->sid = user_srf->prime.base.hash.key;
829
	vmw_resource_unreference(&res);
829
	vmw_resource_unreference(&res);
830
 
830
 
831
	ttm_read_unlock(&vmaster->lock);
831
	ttm_read_unlock(&dev_priv->reservation_sem);
832
	return 0;
832
	return 0;
833
out_no_copy:
833
out_no_copy:
834
	kfree(srf->offsets);
834
	kfree(srf->offsets);
835
out_no_offsets:
835
out_no_offsets:
836
	kfree(srf->sizes);
836
	kfree(srf->sizes);
837
out_no_sizes:
837
out_no_sizes:
838
	ttm_prime_object_kfree(user_srf, prime);
838
	ttm_prime_object_kfree(user_srf, prime);
839
out_no_user_srf:
839
out_no_user_srf:
840
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
840
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
841
out_unlock:
841
out_unlock:
842
	ttm_read_unlock(&vmaster->lock);
842
	ttm_read_unlock(&dev_priv->reservation_sem);
-
 
843
 
843
	return ret;
844
	return ret;
844
}
845
}
845
 
846
 
846
/**
847
/**
847
 * vmw_user_surface_define_ioctl - Ioctl function implementing
848
 * vmw_user_surface_define_ioctl - Ioctl function implementing
848
 *                                  the user surface reference functionality.
849
 *                                  the user surface reference functionality.
849
 *
850
 *
850
 * @dev:            Pointer to a struct drm_device.
851
 * @dev:            Pointer to a struct drm_device.
851
 * @data:           Pointer to data copied from / to user-space.
852
 * @data:           Pointer to data copied from / to user-space.
852
 * @file_priv:      Pointer to a drm file private structure.
853
 * @file_priv:      Pointer to a drm file private structure.
853
 */
854
 */
854
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
855
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
855
				struct drm_file *file_priv)
856
				struct drm_file *file_priv)
856
{
857
{
857
	struct vmw_private *dev_priv = vmw_priv(dev);
858
	struct vmw_private *dev_priv = vmw_priv(dev);
858
	union drm_vmw_surface_reference_arg *arg =
859
	union drm_vmw_surface_reference_arg *arg =
859
	    (union drm_vmw_surface_reference_arg *)data;
860
	    (union drm_vmw_surface_reference_arg *)data;
860
	struct drm_vmw_surface_arg *req = &arg->req;
861
	struct drm_vmw_surface_arg *req = &arg->req;
861
	struct drm_vmw_surface_create_req *rep = &arg->rep;
862
	struct drm_vmw_surface_create_req *rep = &arg->rep;
862
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
863
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
863
	struct vmw_surface *srf;
864
	struct vmw_surface *srf;
864
	struct vmw_user_surface *user_srf;
865
	struct vmw_user_surface *user_srf;
865
	struct drm_vmw_size __user *user_sizes;
866
	struct drm_vmw_size __user *user_sizes;
866
	struct ttm_base_object *base;
867
	struct ttm_base_object *base;
867
	int ret = -EINVAL;
868
	int ret;
868
 
-
 
869
	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
-
 
870
	if (unlikely(base == NULL)) {
-
 
871
		DRM_ERROR("Could not find surface to reference.\n");
-
 
872
		return -EINVAL;
-
 
873
	}
-
 
874
 
869
 
875
	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
870
	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
-
 
871
					   req->handle_type, &base);
-
 
872
	if (unlikely(ret != 0))
876
		goto out_bad_resource;
873
		return ret;
877
 
874
 
878
	user_srf = container_of(base, struct vmw_user_surface, prime.base);
875
	user_srf = container_of(base, struct vmw_user_surface, prime.base);
879
	srf = &user_srf->srf;
876
	srf = &user_srf->srf;
880
 
-
 
881
	ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
-
 
882
				 TTM_REF_USAGE, NULL);
-
 
883
	if (unlikely(ret != 0)) {
-
 
884
		DRM_ERROR("Could not add a reference to a surface.\n");
-
 
885
		goto out_no_reference;
-
 
886
	}
-
 
887
 
877
 
888
	rep->flags = srf->flags;
878
	rep->flags = srf->flags;
889
	rep->format = srf->format;
879
	rep->format = srf->format;
890
	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
880
	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
891
	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
881
	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
892
	    rep->size_addr;
882
	    rep->size_addr;
893
 
883
 
894
	if (user_sizes)
884
	if (user_sizes)
895
		ret = copy_to_user(user_sizes, srf->sizes,
885
		ret = copy_to_user(user_sizes, &srf->base_size,
896
				   srf->num_sizes * sizeof(*srf->sizes));
886
				   sizeof(srf->base_size));
897
	if (unlikely(ret != 0)) {
887
	if (unlikely(ret != 0)) {
898
		DRM_ERROR("copy_to_user failed %p %u\n",
888
		DRM_ERROR("copy_to_user failed %p %u\n",
899
			  user_sizes, srf->num_sizes);
889
			  user_sizes, srf->num_sizes);
-
 
890
		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
900
		ret = -EFAULT;
891
		ret = -EFAULT;
901
	}
892
	}
902
out_bad_resource:
893
out_bad_resource:
903
out_no_reference:
894
out_no_reference:
904
	ttm_base_object_unref(&base);
895
	ttm_base_object_unref(&base);
905
 
896
 
906
	return ret;
897
	return ret;
907
}
898
}
908
 
899
 
909
#endif
900
#endif