Subversion Repositories Kolibri OS

Rev

Rev 5078 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 6296
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
12
 * the following conditions:
13
 *
13
 *
14
 * The above copyright notice and this permission notice (including the
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
16
 * of the Software.
17
 *
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
27
 
27
 
28
#ifndef _VMWGFX_DRV_H_
28
#ifndef _VMWGFX_DRV_H_
29
#define _VMWGFX_DRV_H_
29
#define _VMWGFX_DRV_H_
30
 
30
 
31
#include "vmwgfx_reg.h"
31
#include "vmwgfx_reg.h"
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
-
 
36
//#include 
35
//#include 
37
#include 
36
#include 
38
#include 
37
#include 
39
#include 
38
#include 
40
#include 
39
#include 
41
//#include 
40
//#include 
42
#include "vmwgfx_fence.h"
41
#include "vmwgfx_fence.h"
43
 
42
 
44
#define VMWGFX_DRIVER_DATE "20140704"
43
#define VMWGFX_DRIVER_DATE "20150810"
45
#define VMWGFX_DRIVER_MAJOR 2
44
#define VMWGFX_DRIVER_MAJOR 2
46
#define VMWGFX_DRIVER_MINOR 6
45
#define VMWGFX_DRIVER_MINOR 9
47
#define VMWGFX_DRIVER_PATCHLEVEL 1
46
#define VMWGFX_DRIVER_PATCHLEVEL 0
48
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
49
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
50
#define VMWGFX_MAX_RELOCATIONS 2048
49
#define VMWGFX_MAX_RELOCATIONS 2048
51
#define VMWGFX_MAX_VALIDATIONS 2048
50
#define VMWGFX_MAX_VALIDATIONS 2048
52
#define VMWGFX_MAX_DISPLAYS 16
51
#define VMWGFX_MAX_DISPLAYS 16
53
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
52
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
54
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
53
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
55
 
54
 
56
/*
55
/*
57
 * Perhaps we should have sysfs entries for these.
56
 * Perhaps we should have sysfs entries for these.
58
 */
57
 */
59
#define VMWGFX_NUM_GB_CONTEXT 256
58
#define VMWGFX_NUM_GB_CONTEXT 256
60
#define VMWGFX_NUM_GB_SHADER 20000
59
#define VMWGFX_NUM_GB_SHADER 20000
61
#define VMWGFX_NUM_GB_SURFACE 32768
60
#define VMWGFX_NUM_GB_SURFACE 32768
62
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
61
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
-
 
62
#define VMWGFX_NUM_DXCONTEXT 256
-
 
63
#define VMWGFX_NUM_DXQUERY 512
63
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
64
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
64
			VMWGFX_NUM_GB_SHADER +\
65
			VMWGFX_NUM_GB_SHADER +\
65
			VMWGFX_NUM_GB_SURFACE +\
66
			VMWGFX_NUM_GB_SURFACE +\
66
			VMWGFX_NUM_GB_SCREEN_TARGET)
67
			VMWGFX_NUM_GB_SCREEN_TARGET)
67
 
68
 
68
#define VMW_PL_GMR TTM_PL_PRIV0
69
#define VMW_PL_GMR TTM_PL_PRIV0
69
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
70
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
70
#define VMW_PL_MOB TTM_PL_PRIV1
71
#define VMW_PL_MOB TTM_PL_PRIV1
71
#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
72
#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
72
 
73
 
73
#define VMW_RES_CONTEXT ttm_driver_type0
74
#define VMW_RES_CONTEXT ttm_driver_type0
74
#define VMW_RES_SURFACE ttm_driver_type1
75
#define VMW_RES_SURFACE ttm_driver_type1
75
#define VMW_RES_STREAM ttm_driver_type2
76
#define VMW_RES_STREAM ttm_driver_type2
76
#define VMW_RES_FENCE ttm_driver_type3
77
#define VMW_RES_FENCE ttm_driver_type3
77
#define VMW_RES_SHADER ttm_driver_type4
78
#define VMW_RES_SHADER ttm_driver_type4
78
 
-
 
79
#define ioread32(addr)          readl(addr)
-
 
80
 
79
 
81
static inline void outl(u32 v, u16 port)
80
static inline void outl(u32 v, u16 port)
82
{
81
{
83
    asm volatile("outl %0,%1" : : "a" (v), "dN" (port));
82
    asm volatile("outl %0,%1" : : "a" (v), "dN" (port));
84
}
83
}
85
static inline u32 inl(u16 port)
84
static inline u32 inl(u16 port)
86
{
85
{
87
    u32 v;
86
    u32 v;
88
    asm volatile("inl %1,%0" : "=a" (v) : "dN" (port));
87
    asm volatile("inl %1,%0" : "=a" (v) : "dN" (port));
89
    return v;
88
    return v;
90
}
89
}
91
 
-
 
92
 
-
 
93
struct vmw_fpriv {
90
struct vmw_fpriv {
94
//   struct drm_master *locked_master;
91
	struct drm_master *locked_master;
95
   struct ttm_object_file *tfile;
92
	struct ttm_object_file *tfile;
96
   struct list_head fence_events;
93
	struct list_head fence_events;
97
	bool gb_aware;
94
	bool gb_aware;
98
};
95
};
99
 
96
 
100
struct vmw_dma_buffer {
97
struct vmw_dma_buffer {
101
   struct ttm_buffer_object base;
98
	struct ttm_buffer_object base;
102
   struct list_head res_list;
99
	struct list_head res_list;
-
 
100
	s32 pin_count;
-
 
101
	/* Not ref-counted.  Protected by binding_mutex */
-
 
102
	struct vmw_resource *dx_query_ctx;
103
};
103
};
104
 
104
 
105
/**
105
/**
106
 * struct vmw_validate_buffer - Carries validation info about buffers.
106
 * struct vmw_validate_buffer - Carries validation info about buffers.
107
 *
107
 *
108
 * @base: Validation info for TTM.
108
 * @base: Validation info for TTM.
109
 * @hash: Hash entry for quick lookup of the TTM buffer object.
109
 * @hash: Hash entry for quick lookup of the TTM buffer object.
110
 *
110
 *
111
 * This structure contains also driver private validation info
111
 * This structure contains also driver private validation info
112
 * on top of the info needed by TTM.
112
 * on top of the info needed by TTM.
113
 */
113
 */
114
struct vmw_validate_buffer {
114
struct vmw_validate_buffer {
115
   struct ttm_validate_buffer base;
115
	struct ttm_validate_buffer base;
116
   struct drm_hash_item hash;
116
	struct drm_hash_item hash;
117
	bool validate_as_mob;
117
	bool validate_as_mob;
118
};
118
};
119
 
119
 
120
struct vmw_res_func;
120
struct vmw_res_func;
121
struct vmw_resource {
121
struct vmw_resource {
122
	struct kref kref;
122
	struct kref kref;
123
	struct vmw_private *dev_priv;
123
	struct vmw_private *dev_priv;
124
	int id;
124
	int id;
125
	bool avail;
125
	bool avail;
126
	unsigned long backup_size;
126
	unsigned long backup_size;
127
	bool res_dirty; /* Protected by backup buffer reserved */
127
	bool res_dirty; /* Protected by backup buffer reserved */
128
	bool backup_dirty; /* Protected by backup buffer reserved */
128
	bool backup_dirty; /* Protected by backup buffer reserved */
129
    struct vmw_dma_buffer *backup;
129
	struct vmw_dma_buffer *backup;
130
	unsigned long backup_offset;
130
	unsigned long backup_offset;
-
 
131
	unsigned long pin_count; /* Protected by resource reserved */
131
	const struct vmw_res_func *func;
132
	const struct vmw_res_func *func;
132
	struct list_head lru_head; /* Protected by the resource lock */
133
	struct list_head lru_head; /* Protected by the resource lock */
133
	struct list_head mob_head; /* Protected by @backup reserved */
134
	struct list_head mob_head; /* Protected by @backup reserved */
134
	struct list_head binding_head; /* Protected by binding_mutex */
135
	struct list_head binding_head; /* Protected by binding_mutex */
135
	void (*res_free) (struct vmw_resource *res);
136
	void (*res_free) (struct vmw_resource *res);
136
	void (*hw_destroy) (struct vmw_resource *res);
137
	void (*hw_destroy) (struct vmw_resource *res);
137
};
138
};
138
 
139
 
139
 
140
 
140
/*
141
/*
141
 * Resources that are managed using ioctls.
142
 * Resources that are managed using ioctls.
142
 */
143
 */
143
enum vmw_res_type {
144
enum vmw_res_type {
144
	vmw_res_context,
145
	vmw_res_context,
145
	vmw_res_surface,
146
	vmw_res_surface,
146
	vmw_res_stream,
147
	vmw_res_stream,
147
	vmw_res_shader,
148
	vmw_res_shader,
-
 
149
	vmw_res_dx_context,
-
 
150
	vmw_res_cotable,
-
 
151
	vmw_res_view,
148
	vmw_res_max
152
	vmw_res_max
149
};
153
};
150
 
154
 
151
/*
155
/*
152
 * Resources that are managed using command streams.
156
 * Resources that are managed using command streams.
153
 */
157
 */
154
enum vmw_cmdbuf_res_type {
158
enum vmw_cmdbuf_res_type {
155
	vmw_cmdbuf_res_compat_shader
159
	vmw_cmdbuf_res_shader,
-
 
160
	vmw_cmdbuf_res_view
156
};
161
};
157
 
162
 
158
struct vmw_cmdbuf_res_manager;
163
struct vmw_cmdbuf_res_manager;
159
 
164
 
160
struct vmw_cursor_snooper {
165
struct vmw_cursor_snooper {
161
	struct drm_crtc *crtc;
166
	struct drm_crtc *crtc;
162
	size_t age;
167
	size_t age;
163
	uint32_t *image;
168
	uint32_t *image;
164
};
169
};
165
 
170
 
166
struct vmw_framebuffer;
171
struct vmw_framebuffer;
167
struct vmw_surface_offset;
172
struct vmw_surface_offset;
168
 
173
 
169
struct vmw_surface {
174
struct vmw_surface {
170
    struct vmw_resource res;
175
	struct vmw_resource res;
171
	uint32_t flags;
176
	uint32_t flags;
172
	uint32_t format;
177
	uint32_t format;
173
	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
178
	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
174
	struct drm_vmw_size base_size;
179
	struct drm_vmw_size base_size;
175
	struct drm_vmw_size *sizes;
180
	struct drm_vmw_size *sizes;
176
	uint32_t num_sizes;
181
	uint32_t num_sizes;
177
	bool scanout;
182
	bool scanout;
-
 
183
	uint32_t array_size;
178
	/* TODO so far just a extra pointer */
184
	/* TODO so far just a extra pointer */
179
	struct vmw_cursor_snooper snooper;
185
	struct vmw_cursor_snooper snooper;
180
	struct vmw_surface_offset *offsets;
186
	struct vmw_surface_offset *offsets;
181
	SVGA3dTextureFilter autogen_filter;
187
	SVGA3dTextureFilter autogen_filter;
182
	uint32_t multisample_count;
188
	uint32_t multisample_count;
-
 
189
	struct list_head view_list;
183
};
190
};
184
 
191
 
185
struct vmw_marker_queue {
192
struct vmw_marker_queue {
186
	struct list_head head;
193
	struct list_head head;
187
	u64 lag;
194
	u64 lag;
188
	u64 lag_time;
195
	u64 lag_time;
189
	spinlock_t lock;
196
	spinlock_t lock;
190
};
197
};
191
 
198
 
192
struct vmw_fifo_state {
199
struct vmw_fifo_state {
193
	unsigned long reserved_size;
200
	unsigned long reserved_size;
194
	__le32 *dynamic_buffer;
201
	u32 *dynamic_buffer;
195
	__le32 *static_buffer;
202
	u32 *static_buffer;
196
	unsigned long static_buffer_size;
203
	unsigned long static_buffer_size;
197
	bool using_bounce_buffer;
204
	bool using_bounce_buffer;
198
	uint32_t capabilities;
205
	uint32_t capabilities;
199
	struct mutex fifo_mutex;
206
	struct mutex fifo_mutex;
200
	struct rw_semaphore rwsem;
207
	struct rw_semaphore rwsem;
201
	struct vmw_marker_queue marker_queue;
208
	struct vmw_marker_queue marker_queue;
-
 
209
	bool dx;
202
};
210
};
203
 
211
 
204
struct vmw_relocation {
212
struct vmw_relocation {
205
	SVGAMobId *mob_loc;
213
	SVGAMobId *mob_loc;
206
	SVGAGuestPtr *location;
214
	SVGAGuestPtr *location;
207
	uint32_t index;
215
	uint32_t index;
208
};
216
};
209
 
217
 
210
/**
218
/**
211
 * struct vmw_res_cache_entry - resource information cache entry
219
 * struct vmw_res_cache_entry - resource information cache entry
212
 *
220
 *
213
 * @valid: Whether the entry is valid, which also implies that the execbuf
221
 * @valid: Whether the entry is valid, which also implies that the execbuf
214
 * code holds a reference to the resource, and it's placed on the
222
 * code holds a reference to the resource, and it's placed on the
215
 * validation list.
223
 * validation list.
216
 * @handle: User-space handle of a resource.
224
 * @handle: User-space handle of a resource.
217
 * @res: Non-ref-counted pointer to the resource.
225
 * @res: Non-ref-counted pointer to the resource.
218
 *
226
 *
219
 * Used to avoid frequent repeated user-space handle lookups of the
227
 * Used to avoid frequent repeated user-space handle lookups of the
220
 * same resource.
228
 * same resource.
221
 */
229
 */
222
struct vmw_res_cache_entry {
230
struct vmw_res_cache_entry {
223
	bool valid;
231
	bool valid;
224
	uint32_t handle;
232
	uint32_t handle;
225
	struct vmw_resource *res;
233
	struct vmw_resource *res;
226
	struct vmw_resource_val_node *node;
234
	struct vmw_resource_val_node *node;
227
};
235
};
228
 
236
 
229
/**
237
/**
230
 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
238
 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
231
 */
239
 */
232
enum vmw_dma_map_mode {
240
enum vmw_dma_map_mode {
233
	vmw_dma_phys,           /* Use physical page addresses */
241
	vmw_dma_phys,           /* Use physical page addresses */
234
	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
242
	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
235
	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
243
	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
236
	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
244
	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
237
	vmw_dma_map_max
245
	vmw_dma_map_max
238
};
246
};
239
 
247
 
240
/**
248
/**
241
 * struct vmw_sg_table - Scatter/gather table for binding, with additional
249
 * struct vmw_sg_table - Scatter/gather table for binding, with additional
242
 * device-specific information.
250
 * device-specific information.
243
 *
251
 *
244
 * @sgt: Pointer to a struct sg_table with binding information
252
 * @sgt: Pointer to a struct sg_table with binding information
245
 * @num_regions: Number of regions with device-address contigous pages
253
 * @num_regions: Number of regions with device-address contiguous pages
246
 */
254
 */
247
struct vmw_sg_table {
255
struct vmw_sg_table {
248
	enum vmw_dma_map_mode mode;
256
	enum vmw_dma_map_mode mode;
249
	struct page **pages;
257
	struct page **pages;
250
	const dma_addr_t *addrs;
258
	const dma_addr_t *addrs;
251
	struct sg_table *sgt;
259
	struct sg_table *sgt;
252
	unsigned long num_regions;
260
	unsigned long num_regions;
253
	unsigned long num_pages;
261
	unsigned long num_pages;
254
};
262
};
255
 
263
 
256
/**
264
/**
257
 * struct vmw_piter - Page iterator that iterates over a list of pages
265
 * struct vmw_piter - Page iterator that iterates over a list of pages
258
 * and DMA addresses that could be either a scatter-gather list or
266
 * and DMA addresses that could be either a scatter-gather list or
259
 * arrays
267
 * arrays
260
 *
268
 *
261
 * @pages: Array of page pointers to the pages.
269
 * @pages: Array of page pointers to the pages.
262
 * @addrs: DMA addresses to the pages if coherent pages are used.
270
 * @addrs: DMA addresses to the pages if coherent pages are used.
263
 * @iter: Scatter-gather page iterator. Current position in SG list.
271
 * @iter: Scatter-gather page iterator. Current position in SG list.
264
 * @i: Current position in arrays.
272
 * @i: Current position in arrays.
265
 * @num_pages: Number of pages total.
273
 * @num_pages: Number of pages total.
266
 * @next: Function to advance the iterator. Returns false if past the list
274
 * @next: Function to advance the iterator. Returns false if past the list
267
 * of pages, true otherwise.
275
 * of pages, true otherwise.
268
 * @dma_address: Function to return the DMA address of the current page.
276
 * @dma_address: Function to return the DMA address of the current page.
269
 */
277
 */
270
struct vmw_piter {
278
struct vmw_piter {
271
	struct page **pages;
279
	struct page **pages;
272
	const dma_addr_t *addrs;
280
	const dma_addr_t *addrs;
273
	struct sg_page_iter iter;
281
	struct sg_page_iter iter;
274
	unsigned long i;
282
	unsigned long i;
275
	unsigned long num_pages;
283
	unsigned long num_pages;
276
	bool (*next)(struct vmw_piter *);
284
	bool (*next)(struct vmw_piter *);
277
	dma_addr_t (*dma_address)(struct vmw_piter *);
285
	dma_addr_t (*dma_address)(struct vmw_piter *);
278
	struct page *(*page)(struct vmw_piter *);
286
	struct page *(*page)(struct vmw_piter *);
279
};
287
};
280
 
288
 
281
/*
289
/*
282
 * enum vmw_ctx_binding_type - abstract resource to context binding types
290
 * enum vmw_display_unit_type - Describes the display unit
283
 */
-
 
284
enum vmw_ctx_binding_type {
-
 
285
	vmw_ctx_binding_shader,
-
 
286
	vmw_ctx_binding_rt,
-
 
287
	vmw_ctx_binding_tex,
-
 
288
	vmw_ctx_binding_max
-
 
289
};
-
 
290
 
-
 
291
/**
-
 
292
 * struct vmw_ctx_bindinfo - structure representing a single context binding
-
 
293
 *
-
 
294
 * @ctx: Pointer to the context structure. NULL means the binding is not
-
 
295
 * active.
-
 
296
 * @res: Non ref-counted pointer to the bound resource.
-
 
297
 * @bt: The binding type.
-
 
298
 * @i1: Union of information needed to unbind.
-
 
299
 */
291
 */
300
struct vmw_ctx_bindinfo {
292
enum vmw_display_unit_type {
301
	struct vmw_resource *ctx;
293
	vmw_du_invalid = 0,
302
	struct vmw_resource *res;
294
	vmw_du_legacy,
303
	enum vmw_ctx_binding_type bt;
295
	vmw_du_screen_object,
304
	bool scrubbed;
-
 
305
	union {
-
 
306
		SVGA3dShaderType shader_type;
-
 
307
		SVGA3dRenderTargetType rt_type;
-
 
308
		uint32 texture_stage;
296
	vmw_du_screen_target
309
	} i1;
-
 
310
};
297
};
311
 
-
 
312
/**
-
 
313
 * struct vmw_ctx_binding - structure representing a single context binding
-
 
314
 *                        - suitable for tracking in a context
-
 
315
 *
-
 
316
 * @ctx_list: List head for context.
-
 
317
 * @res_list: List head for bound resource.
-
 
318
 * @bi: Binding info
-
 
319
 */
-
 
320
struct vmw_ctx_binding {
-
 
321
	struct list_head ctx_list;
-
 
322
	struct list_head res_list;
-
 
323
	struct vmw_ctx_bindinfo bi;
-
 
324
};
-
 
325
 
-
 
326
 
-
 
327
/**
-
 
328
 * struct vmw_ctx_binding_state - context binding state
-
 
329
 *
-
 
330
 * @list: linked list of individual bindings.
-
 
331
 * @render_targets: Render target bindings.
-
 
332
 * @texture_units: Texture units/samplers bindings.
-
 
333
 * @shaders: Shader bindings.
-
 
334
 *
-
 
335
 * Note that this structure also provides storage space for the individual
-
 
336
 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
-
 
337
 * for individual bindings.
-
 
338
 *
-
 
339
 */
-
 
340
struct vmw_ctx_binding_state {
-
 
341
	struct list_head list;
-
 
342
	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
-
 
343
	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
-
 
344
	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
-
 
345
};
298
 
346
 
299
 
347
struct vmw_sw_context{
300
struct vmw_sw_context{
348
	struct drm_open_hash res_ht;
301
	struct drm_open_hash res_ht;
349
	bool res_ht_initialized;
302
	bool res_ht_initialized;
350
	bool kernel; /**< is the called made from the kernel */
303
	bool kernel; /**< is the called made from the kernel */
351
	struct vmw_fpriv *fp;
304
	struct vmw_fpriv *fp;
352
	struct list_head validate_nodes;
305
	struct list_head validate_nodes;
353
	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
306
	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
354
	uint32_t cur_reloc;
307
	uint32_t cur_reloc;
355
    struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
308
	struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
356
	uint32_t cur_val_buf;
309
	uint32_t cur_val_buf;
357
	uint32_t *cmd_bounce;
310
	uint32_t *cmd_bounce;
358
	uint32_t cmd_bounce_size;
311
	uint32_t cmd_bounce_size;
359
	struct list_head resource_list;
312
	struct list_head resource_list;
360
	uint32_t fence_flags;
313
	struct list_head ctx_resource_list; /* For contexts and cotables */
361
	struct ttm_buffer_object *cur_query_bo;
314
	struct vmw_dma_buffer *cur_query_bo;
362
	struct list_head res_relocations;
315
	struct list_head res_relocations;
363
	uint32_t *buf_start;
316
	uint32_t *buf_start;
364
	struct vmw_res_cache_entry res_cache[vmw_res_max];
317
	struct vmw_res_cache_entry res_cache[vmw_res_max];
365
	struct vmw_resource *last_query_ctx;
318
	struct vmw_resource *last_query_ctx;
366
	bool needs_post_query_barrier;
319
	bool needs_post_query_barrier;
367
	struct vmw_resource *error_resource;
320
	struct vmw_resource *error_resource;
368
	struct vmw_ctx_binding_state staged_bindings;
321
	struct vmw_ctx_binding_state *staged_bindings;
-
 
322
	bool staged_bindings_inuse;
369
	struct list_head staged_cmd_res;
323
	struct list_head staged_cmd_res;
-
 
324
	struct vmw_resource_val_node *dx_ctx_node;
-
 
325
	struct vmw_dma_buffer *dx_query_mob;
-
 
326
	struct vmw_resource *dx_query_ctx;
-
 
327
	struct vmw_cmdbuf_res_manager *man;
370
};
328
};
371
 
329
 
372
struct vmw_legacy_display;
330
struct vmw_legacy_display;
373
struct vmw_overlay;
331
struct vmw_overlay;
374
 
332
 
375
struct vmw_master {
333
struct vmw_master {
376
    struct ttm_lock lock;
334
	struct ttm_lock lock;
377
	struct mutex fb_surf_mutex;
-
 
378
	struct list_head fb_surf;
-
 
379
};
335
};
380
 
336
 
381
struct vmw_vga_topology_state {
337
struct vmw_vga_topology_state {
382
	uint32_t width;
338
	uint32_t width;
383
	uint32_t height;
339
	uint32_t height;
384
	uint32_t primary;
340
	uint32_t primary;
385
	uint32_t pos_x;
341
	uint32_t pos_x;
386
	uint32_t pos_y;
342
	uint32_t pos_y;
387
};
343
};
-
 
344
 
-
 
345
 
-
 
346
/*
-
 
347
 * struct vmw_otable - Guest Memory OBject table metadata
-
 
348
 *
-
 
349
 * @size:           Size of the table (page-aligned).
-
 
350
 * @page_table:     Pointer to a struct vmw_mob holding the page table.
-
 
351
 */
-
 
352
struct vmw_otable {
-
 
353
	unsigned long size;
-
 
354
	struct vmw_mob *page_table;
-
 
355
	bool enabled;
-
 
356
};
-
 
357
 
-
 
358
struct vmw_otable_batch {
-
 
359
	unsigned num_otables;
-
 
360
	struct vmw_otable *otables;
-
 
361
	struct vmw_resource *context;
-
 
362
	struct ttm_buffer_object *otable_bo;
-
 
363
};
388
 
364
 
389
struct vmw_private {
365
struct vmw_private {
390
    struct ttm_bo_device bdev;
366
	struct ttm_bo_device bdev;
391
    struct ttm_bo_global_ref bo_global_ref;
367
	struct ttm_bo_global_ref bo_global_ref;
392
    struct drm_global_reference mem_global_ref;
368
	struct drm_global_reference mem_global_ref;
393
 
369
 
394
	struct vmw_fifo_state fifo;
370
	struct vmw_fifo_state fifo;
395
 
371
 
396
	struct drm_device *dev;
372
	struct drm_device *dev;
397
	unsigned long vmw_chipset;
373
	unsigned long vmw_chipset;
398
	unsigned int io_start;
374
	unsigned int io_start;
399
	uint32_t vram_start;
375
	uint32_t vram_start;
400
	uint32_t vram_size;
376
	uint32_t vram_size;
401
	uint32_t prim_bb_mem;
377
	uint32_t prim_bb_mem;
402
	uint32_t mmio_start;
378
	uint32_t mmio_start;
403
	uint32_t mmio_size;
379
	uint32_t mmio_size;
404
	uint32_t fb_max_width;
380
	uint32_t fb_max_width;
405
	uint32_t fb_max_height;
381
	uint32_t fb_max_height;
-
 
382
	uint32_t texture_max_width;
-
 
383
	uint32_t texture_max_height;
-
 
384
	uint32_t stdu_max_width;
-
 
385
	uint32_t stdu_max_height;
406
	uint32_t initial_width;
386
	uint32_t initial_width;
407
	uint32_t initial_height;
387
	uint32_t initial_height;
408
	__le32 __iomem *mmio_virt;
-
 
409
	int mmio_mtrr;
388
	u32 *mmio_virt;
410
	uint32_t capabilities;
389
	uint32_t capabilities;
411
	uint32_t max_gmr_ids;
390
	uint32_t max_gmr_ids;
412
	uint32_t max_gmr_pages;
391
	uint32_t max_gmr_pages;
413
	uint32_t max_mob_pages;
392
	uint32_t max_mob_pages;
414
	uint32_t max_mob_size;
393
	uint32_t max_mob_size;
415
	uint32_t memory_size;
394
	uint32_t memory_size;
416
	bool has_gmr;
395
	bool has_gmr;
417
	bool has_mob;
396
	bool has_mob;
418
	struct mutex hw_mutex;
397
	spinlock_t hw_lock;
-
 
398
	spinlock_t cap_lock;
-
 
399
	bool has_dx;
419
 
400
 
420
	/*
401
	/*
421
	 * VGA registers.
402
	 * VGA registers.
422
	 */
403
	 */
423
 
404
 
424
	struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
405
	struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
425
	uint32_t vga_width;
406
	uint32_t vga_width;
426
	uint32_t vga_height;
407
	uint32_t vga_height;
427
	uint32_t vga_bpp;
408
	uint32_t vga_bpp;
428
	uint32_t vga_bpl;
409
	uint32_t vga_bpl;
429
	uint32_t vga_pitchlock;
410
	uint32_t vga_pitchlock;
430
 
411
 
431
	uint32_t num_displays;
412
	uint32_t num_displays;
432
 
413
 
433
	/*
414
	/*
434
	 * Framebuffer info.
415
	 * Framebuffer info.
435
	 */
416
	 */
436
 
417
 
437
	void *fb_info;
418
	void *fb_info;
-
 
419
	enum vmw_display_unit_type active_display_unit;
438
	struct vmw_legacy_display *ldu_priv;
420
	struct vmw_legacy_display *ldu_priv;
439
	struct vmw_screen_object_display *sou_priv;
421
	struct vmw_screen_object_display *sou_priv;
440
	struct vmw_overlay *overlay_priv;
422
	struct vmw_overlay *overlay_priv;
441
 
423
 
442
	/*
424
	/*
443
	 * Context and surface management.
425
	 * Context and surface management.
444
	 */
426
	 */
445
 
427
 
446
	rwlock_t resource_lock;
428
	rwlock_t resource_lock;
447
	struct idr res_idr[vmw_res_max];
429
	struct idr res_idr[vmw_res_max];
448
	/*
430
	/*
449
	 * Block lastclose from racing with firstopen.
431
	 * Block lastclose from racing with firstopen.
450
	 */
432
	 */
451
 
433
 
452
	struct mutex init_mutex;
434
	struct mutex init_mutex;
453
 
435
 
454
	/*
436
	/*
455
	 * A resource manager for kernel-only surfaces and
437
	 * A resource manager for kernel-only surfaces and
456
	 * contexts.
438
	 * contexts.
457
	 */
439
	 */
458
 
440
 
459
	struct ttm_object_device *tdev;
441
	struct ttm_object_device *tdev;
460
 
442
 
461
	/*
443
	/*
462
	 * Fencing and IRQs.
444
	 * Fencing and IRQs.
463
	 */
445
	 */
464
 
446
 
465
	atomic_t marker_seq;
447
	atomic_t marker_seq;
466
	wait_queue_head_t fence_queue;
448
	wait_queue_head_t fence_queue;
467
	wait_queue_head_t fifo_queue;
449
	wait_queue_head_t fifo_queue;
-
 
450
	spinlock_t waiter_lock;
468
	int fence_queue_waiters; /* Protected by hw_mutex */
451
	int fence_queue_waiters; /* Protected by waiter_lock */
469
	int goal_queue_waiters; /* Protected by hw_mutex */
452
	int goal_queue_waiters; /* Protected by waiter_lock */
-
 
453
	int cmdbuf_waiters; /* Protected by waiter_lock */
-
 
454
	int error_waiters; /* Protected by waiter_lock */
470
	atomic_t fifo_queue_waiters;
455
	int fifo_queue_waiters; /* Protected by waiter_lock */
471
	uint32_t last_read_seqno;
456
	uint32_t last_read_seqno;
472
	spinlock_t irq_lock;
-
 
473
	struct vmw_fence_manager *fman;
457
	struct vmw_fence_manager *fman;
474
	uint32_t irq_mask;
458
	uint32_t irq_mask; /* Updates protected by waiter_lock */
475
 
459
 
476
	/*
460
	/*
477
	 * Device state
461
	 * Device state
478
	 */
462
	 */
479
 
463
 
480
	uint32_t traces_state;
464
	uint32_t traces_state;
481
	uint32_t enable_state;
465
	uint32_t enable_state;
482
	uint32_t config_done_state;
466
	uint32_t config_done_state;
483
 
467
 
484
	/**
468
	/**
485
	 * Execbuf
469
	 * Execbuf
486
	 */
470
	 */
487
	/**
471
	/**
488
	 * Protected by the cmdbuf mutex.
472
	 * Protected by the cmdbuf mutex.
489
	 */
473
	 */
490
 
474
 
491
	struct vmw_sw_context ctx;
475
	struct vmw_sw_context ctx;
492
	struct mutex cmdbuf_mutex;
476
	struct mutex cmdbuf_mutex;
493
	struct mutex binding_mutex;
477
	struct mutex binding_mutex;
494
 
478
 
495
	/**
479
	/**
496
	 * Operating mode.
480
	 * Operating mode.
497
	 */
481
	 */
498
 
482
 
499
	bool stealth;
483
	bool stealth;
500
	bool enable_fb;
484
	bool enable_fb;
-
 
485
	spinlock_t svga_lock;
501
 
486
 
502
	/**
487
	/**
503
	 * Master management.
488
	 * Master management.
504
	 */
489
	 */
505
 
490
 
506
	struct vmw_master *active_master;
491
	struct vmw_master *active_master;
507
	struct vmw_master fbdev_master;
492
	struct vmw_master fbdev_master;
508
//	struct notifier_block pm_nb;
-
 
509
	bool suspended;
493
	bool suspended;
-
 
494
	bool refuse_hibernation;
510
 
495
 
511
	struct mutex release_mutex;
496
	struct mutex release_mutex;
512
	uint32_t num_3d_resources;
497
	atomic_t num_fifo_resources;
513
 
498
 
514
	/*
499
	/*
515
	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
500
	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
516
	 */
501
	 */
517
	struct ttm_lock reservation_sem;
502
	struct ttm_lock reservation_sem;
518
 
503
 
519
	/*
504
	/*
520
	 * Query processing. These members
505
	 * Query processing. These members
521
	 * are protected by the cmdbuf mutex.
506
	 * are protected by the cmdbuf mutex.
522
	 */
507
	 */
523
 
508
 
524
    struct ttm_buffer_object *dummy_query_bo;
509
	struct vmw_dma_buffer *dummy_query_bo;
525
    struct ttm_buffer_object *pinned_bo;
510
	struct vmw_dma_buffer *pinned_bo;
526
	uint32_t query_cid;
511
	uint32_t query_cid;
527
	uint32_t query_cid_valid;
512
	uint32_t query_cid_valid;
528
	bool dummy_query_bo_pinned;
513
	bool dummy_query_bo_pinned;
529
 
514
 
530
	/*
515
	/*
531
	 * Surface swapping. The "surface_lru" list is protected by the
516
	 * Surface swapping. The "surface_lru" list is protected by the
532
	 * resource lock in order to be able to destroy a surface and take
517
	 * resource lock in order to be able to destroy a surface and take
533
	 * it off the lru atomically. "used_memory_size" is currently
518
	 * it off the lru atomically. "used_memory_size" is currently
534
	 * protected by the cmdbuf mutex for simplicity.
519
	 * protected by the cmdbuf mutex for simplicity.
535
	 */
520
	 */
536
 
521
 
537
	struct list_head res_lru[vmw_res_max];
522
	struct list_head res_lru[vmw_res_max];
538
	uint32_t used_memory_size;
523
	uint32_t used_memory_size;
539
 
524
 
540
	/*
525
	/*
541
	 * DMA mapping stuff.
526
	 * DMA mapping stuff.
542
	 */
527
	 */
543
	enum vmw_dma_map_mode map_mode;
528
	enum vmw_dma_map_mode map_mode;
544
 
529
 
545
	/*
530
	/*
546
	 * Guest Backed stuff
531
	 * Guest Backed stuff
547
	 */
532
	 */
548
	struct ttm_buffer_object *otable_bo;
533
	struct vmw_otable_batch otable_batch;
-
 
534
 
549
	struct vmw_otable *otables;
535
	struct vmw_cmdbuf_man *cman;
550
};
536
};
551
 
537
 
552
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
538
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
553
{
539
{
554
	return container_of(res, struct vmw_surface, res);
540
	return container_of(res, struct vmw_surface, res);
555
}
541
}
556
 
542
 
557
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
543
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
558
{
544
{
559
	return (struct vmw_private *)dev->dev_private;
545
	return (struct vmw_private *)dev->dev_private;
560
}
546
}
561
 
547
 
562
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
548
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
563
{
549
{
564
	return (struct vmw_fpriv *)file_priv->driver_priv;
550
	return (struct vmw_fpriv *)file_priv->driver_priv;
565
}
551
}
566
 
552
 
567
static inline struct vmw_master *vmw_master(struct drm_master *master)
553
static inline struct vmw_master *vmw_master(struct drm_master *master)
568
{
554
{
569
	return (struct vmw_master *) master->driver_priv;
555
	return (struct vmw_master *) master->driver_priv;
570
}
556
}
-
 
557
 
-
 
558
/*
-
 
559
 * The locking here is fine-grained, so that it is performed once
-
 
560
 * for every read- and write operation. This is of course costly, but we
-
 
561
 * don't perform much register access in the timing critical paths anyway.
-
 
562
 * Instead we have the extra benefit of being sure that we don't forget
-
 
563
 * the hw lock around register accesses.
571
 
564
 */
572
static inline void vmw_write(struct vmw_private *dev_priv,
565
static inline void vmw_write(struct vmw_private *dev_priv,
573
			     unsigned int offset, uint32_t value)
566
			     unsigned int offset, uint32_t value)
574
{
567
{
-
 
568
	unsigned long irq_flags;
-
 
569
 
-
 
570
	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
575
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
571
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
576
	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
572
	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
-
 
573
	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
577
}
574
}
578
 
575
 
579
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
576
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
580
				unsigned int offset)
577
				unsigned int offset)
581
{
578
{
-
 
579
	unsigned long irq_flags;
582
	uint32_t val;
580
	u32 val;
-
 
581
 
583
 
582
	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
584
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
583
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
-
 
584
	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
-
 
585
	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
585
	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
586
 
586
	return val;
587
	return val;
587
}
588
}
588
 
589
 
589
int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
590
extern void vmw_svga_enable(struct vmw_private *dev_priv);
-
 
591
extern void vmw_svga_disable(struct vmw_private *dev_priv);
590
void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
592
 
591
 
593
 
592
/**
594
/**
593
 * GMR utilities - vmwgfx_gmr.c
595
 * GMR utilities - vmwgfx_gmr.c
594
 */
596
 */
595
 
597
 
596
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
598
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
597
			const struct vmw_sg_table *vsgt,
599
			const struct vmw_sg_table *vsgt,
598
			unsigned long num_pages,
600
			unsigned long num_pages,
599
			int gmr_id);
601
			int gmr_id);
600
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
602
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
601
 
603
 
602
/**
604
/**
603
 * Resource utilities - vmwgfx_resource.c
605
 * Resource utilities - vmwgfx_resource.c
604
 */
606
 */
605
struct vmw_user_resource_conv;
607
struct vmw_user_resource_conv;
606
 
608
 
607
extern void vmw_resource_unreference(struct vmw_resource **p_res);
609
extern void vmw_resource_unreference(struct vmw_resource **p_res);
608
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
610
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
609
extern struct vmw_resource *
611
extern struct vmw_resource *
610
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
612
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
611
extern int vmw_resource_validate(struct vmw_resource *res);
613
extern int vmw_resource_validate(struct vmw_resource *res);
612
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
614
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
-
 
615
				bool no_backup);
613
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
616
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
614
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
617
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
615
				  struct ttm_object_file *tfile,
618
				  struct ttm_object_file *tfile,
616
				  uint32_t handle,
619
				  uint32_t handle,
617
				  struct vmw_surface **out_surf,
620
				  struct vmw_surface **out_surf,
618
				  struct vmw_dma_buffer **out_buf);
621
				  struct vmw_dma_buffer **out_buf);
619
extern int vmw_user_resource_lookup_handle(
622
extern int vmw_user_resource_lookup_handle(
620
	struct vmw_private *dev_priv,
623
	struct vmw_private *dev_priv,
621
	struct ttm_object_file *tfile,
624
	struct ttm_object_file *tfile,
622
	uint32_t handle,
625
	uint32_t handle,
623
	const struct vmw_user_resource_conv *converter,
626
	const struct vmw_user_resource_conv *converter,
624
	struct vmw_resource **p_res);
627
	struct vmw_resource **p_res);
625
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
628
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
626
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
629
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
627
			   struct vmw_dma_buffer *vmw_bo,
630
			   struct vmw_dma_buffer *vmw_bo,
628
			   size_t size, struct ttm_placement *placement,
631
			   size_t size, struct ttm_placement *placement,
629
			   bool interuptable,
632
			   bool interuptable,
630
			   void (*bo_free) (struct ttm_buffer_object *bo));
633
			   void (*bo_free) (struct ttm_buffer_object *bo));
631
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
634
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
632
				  struct ttm_object_file *tfile);
635
				  struct ttm_object_file *tfile);
633
extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
636
extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
634
				 struct ttm_object_file *tfile,
637
				 struct ttm_object_file *tfile,
635
				 uint32_t size,
638
				 uint32_t size,
636
				 bool shareable,
639
				 bool shareable,
637
				 uint32_t *handle,
640
				 uint32_t *handle,
638
				 struct vmw_dma_buffer **p_dma_buf);
641
				 struct vmw_dma_buffer **p_dma_buf,
-
 
642
				 struct ttm_base_object **p_base);
639
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
643
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
640
				     struct vmw_dma_buffer *dma_buf,
644
				     struct vmw_dma_buffer *dma_buf,
641
				     uint32_t *handle);
645
				     uint32_t *handle);
642
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
646
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
643
				  struct drm_file *file_priv);
647
				  struct drm_file *file_priv);
644
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
648
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
645
				  struct drm_file *file_priv);
649
				  struct drm_file *file_priv);
646
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
650
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
647
					 struct drm_file *file_priv);
651
					 struct drm_file *file_priv);
648
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
652
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
649
					 uint32_t cur_validate_node);
653
					 uint32_t cur_validate_node);
650
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
654
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
651
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
655
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
652
				  uint32_t id, struct vmw_dma_buffer **out);
656
				  uint32_t id, struct vmw_dma_buffer **out,
-
 
657
				  struct ttm_base_object **base);
653
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
658
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
654
				  struct drm_file *file_priv);
659
				  struct drm_file *file_priv);
655
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
660
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
656
				  struct drm_file *file_priv);
661
				  struct drm_file *file_priv);
657
extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
662
extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
658
				  struct ttm_object_file *tfile,
663
				  struct ttm_object_file *tfile,
659
				  uint32_t *inout_id,
664
				  uint32_t *inout_id,
660
				  struct vmw_resource **out);
665
				  struct vmw_resource **out);
661
extern void vmw_resource_unreserve(struct vmw_resource *res,
666
extern void vmw_resource_unreserve(struct vmw_resource *res,
-
 
667
				   bool switch_backup,
662
				   struct vmw_dma_buffer *new_backup,
668
				   struct vmw_dma_buffer *new_backup,
663
				   unsigned long new_backup_offset);
669
				   unsigned long new_backup_offset);
664
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
670
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
665
				     struct ttm_mem_reg *mem);
671
				     struct ttm_mem_reg *mem);
-
 
672
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
-
 
673
				  struct ttm_mem_reg *mem);
-
 
674
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
666
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
675
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
667
				struct vmw_fence_obj *fence);
676
				struct vmw_fence_obj *fence);
668
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
677
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
669
 
678
 
670
/**
679
/**
671
 * DMA buffer helper routines - vmwgfx_dmabuf.c
680
 * DMA buffer helper routines - vmwgfx_dmabuf.c
672
 */
681
 */
673
extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
682
extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
674
				   struct vmw_dma_buffer *bo,
683
				       struct vmw_dma_buffer *bo,
675
				   struct ttm_placement *placement,
684
				       struct ttm_placement *placement,
676
				   bool interruptible);
685
				       bool interruptible);
677
extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
686
extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
678
			      struct vmw_dma_buffer *buf,
687
				  struct vmw_dma_buffer *buf,
679
			      bool pin, bool interruptible);
688
				  bool interruptible);
680
extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
689
extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
681
				     struct vmw_dma_buffer *buf,
690
					 struct vmw_dma_buffer *buf,
682
				     bool pin, bool interruptible);
691
					 bool interruptible);
683
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
692
extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
684
				       struct vmw_dma_buffer *bo,
693
					   struct vmw_dma_buffer *bo,
685
				       bool pin, bool interruptible);
694
					   bool interruptible);
686
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
695
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
687
			    struct vmw_dma_buffer *bo,
696
			    struct vmw_dma_buffer *bo,
688
			    bool interruptible);
697
			    bool interruptible);
689
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
698
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
690
				 SVGAGuestPtr *ptr);
699
				 SVGAGuestPtr *ptr);
691
extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
700
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
692
 
701
 
693
/**
702
/**
694
 * Misc Ioctl functionality - vmwgfx_ioctl.c
703
 * Misc Ioctl functionality - vmwgfx_ioctl.c
695
 */
704
 */
696
 
705
 
697
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
706
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
698
			      struct drm_file *file_priv);
707
			      struct drm_file *file_priv);
699
extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
708
extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
700
				struct drm_file *file_priv);
709
				struct drm_file *file_priv);
701
extern int vmw_present_ioctl(struct drm_device *dev, void *data,
710
extern int vmw_present_ioctl(struct drm_device *dev, void *data,
702
			     struct drm_file *file_priv);
711
			     struct drm_file *file_priv);
703
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
712
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
704
				      struct drm_file *file_priv);
713
				      struct drm_file *file_priv);
-
 
714
extern unsigned int vmw_fops_poll(struct file *filp,
-
 
715
				  struct poll_table_struct *wait);
705
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
716
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
706
			     size_t count, loff_t *offset);
717
			     size_t count, loff_t *offset);
707
 
718
 
708
/**
719
/**
709
 * Fifo utilities - vmwgfx_fifo.c
720
 * Fifo utilities - vmwgfx_fifo.c
710
 */
721
 */
711
 
722
 
712
extern int vmw_fifo_init(struct vmw_private *dev_priv,
723
extern int vmw_fifo_init(struct vmw_private *dev_priv,
713
			 struct vmw_fifo_state *fifo);
724
			 struct vmw_fifo_state *fifo);
714
extern void vmw_fifo_release(struct vmw_private *dev_priv,
725
extern void vmw_fifo_release(struct vmw_private *dev_priv,
715
			     struct vmw_fifo_state *fifo);
726
			     struct vmw_fifo_state *fifo);
716
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
727
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
-
 
728
extern void *
-
 
729
vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
717
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
730
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
-
 
731
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
718
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
732
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
719
			       uint32_t *seqno);
733
			       uint32_t *seqno);
-
 
734
extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
720
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
735
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
721
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
736
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
722
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
737
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
723
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
738
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
724
				     uint32_t cid);
739
				     uint32_t cid);
-
 
740
extern int vmw_fifo_flush(struct vmw_private *dev_priv,
-
 
741
			  bool interruptible);
725
 
742
 
726
/**
743
/**
727
 * TTM glue - vmwgfx_ttm_glue.c
744
 * TTM glue - vmwgfx_ttm_glue.c
728
 */
745
 */
729
 
746
 
730
extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
747
extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
731
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
748
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
732
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
749
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
733
 
750
 
734
/**
751
/**
735
 * TTM buffer object driver - vmwgfx_buffer.c
752
 * TTM buffer object driver - vmwgfx_buffer.c
736
 */
753
 */
737
 
754
 
738
extern const size_t vmw_tt_size;
755
extern const size_t vmw_tt_size;
739
extern struct ttm_placement vmw_vram_placement;
756
extern struct ttm_placement vmw_vram_placement;
740
extern struct ttm_placement vmw_vram_ne_placement;
757
extern struct ttm_placement vmw_vram_ne_placement;
741
extern struct ttm_placement vmw_vram_sys_placement;
758
extern struct ttm_placement vmw_vram_sys_placement;
742
extern struct ttm_placement vmw_vram_gmr_placement;
759
extern struct ttm_placement vmw_vram_gmr_placement;
743
extern struct ttm_placement vmw_vram_gmr_ne_placement;
760
extern struct ttm_placement vmw_vram_gmr_ne_placement;
744
extern struct ttm_placement vmw_sys_placement;
761
extern struct ttm_placement vmw_sys_placement;
745
extern struct ttm_placement vmw_sys_ne_placement;
762
extern struct ttm_placement vmw_sys_ne_placement;
746
extern struct ttm_placement vmw_evictable_placement;
763
extern struct ttm_placement vmw_evictable_placement;
747
extern struct ttm_placement vmw_srf_placement;
764
extern struct ttm_placement vmw_srf_placement;
748
extern struct ttm_placement vmw_mob_placement;
765
extern struct ttm_placement vmw_mob_placement;
-
 
766
extern struct ttm_placement vmw_mob_ne_placement;
749
extern struct ttm_bo_driver vmw_bo_driver;
767
extern struct ttm_bo_driver vmw_bo_driver;
750
extern int vmw_dma_quiescent(struct drm_device *dev);
768
extern int vmw_dma_quiescent(struct drm_device *dev);
751
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
769
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
752
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
770
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
753
extern const struct vmw_sg_table *
771
extern const struct vmw_sg_table *
754
vmw_bo_sg_table(struct ttm_buffer_object *bo);
772
vmw_bo_sg_table(struct ttm_buffer_object *bo);
755
extern void vmw_piter_start(struct vmw_piter *viter,
773
extern void vmw_piter_start(struct vmw_piter *viter,
756
			    const struct vmw_sg_table *vsgt,
774
			    const struct vmw_sg_table *vsgt,
757
			    unsigned long p_offs);
775
			    unsigned long p_offs);
758
 
776
 
759
/**
777
/**
760
 * vmw_piter_next - Advance the iterator one page.
778
 * vmw_piter_next - Advance the iterator one page.
761
 *
779
 *
762
 * @viter: Pointer to the iterator to advance.
780
 * @viter: Pointer to the iterator to advance.
763
 *
781
 *
764
 * Returns false if past the list of pages, true otherwise.
782
 * Returns false if past the list of pages, true otherwise.
765
 */
783
 */
766
static inline bool vmw_piter_next(struct vmw_piter *viter)
784
static inline bool vmw_piter_next(struct vmw_piter *viter)
767
{
785
{
768
	return viter->next(viter);
786
	return viter->next(viter);
769
}
787
}
770
 
788
 
771
/**
789
/**
772
 * vmw_piter_dma_addr - Return the DMA address of the current page.
790
 * vmw_piter_dma_addr - Return the DMA address of the current page.
773
 *
791
 *
774
 * @viter: Pointer to the iterator
792
 * @viter: Pointer to the iterator
775
 *
793
 *
776
 * Returns the DMA address of the page pointed to by @viter.
794
 * Returns the DMA address of the page pointed to by @viter.
777
 */
795
 */
778
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
796
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
779
{
797
{
780
	return viter->dma_address(viter);
798
	return viter->dma_address(viter);
781
}
799
}
782
 
800
 
783
/**
801
/**
784
 * vmw_piter_page - Return a pointer to the current page.
802
 * vmw_piter_page - Return a pointer to the current page.
785
 *
803
 *
786
 * @viter: Pointer to the iterator
804
 * @viter: Pointer to the iterator
787
 *
805
 *
788
 * Returns the DMA address of the page pointed to by @viter.
806
 * Returns the DMA address of the page pointed to by @viter.
789
 */
807
 */
790
static inline struct page *vmw_piter_page(struct vmw_piter *viter)
808
static inline struct page *vmw_piter_page(struct vmw_piter *viter)
791
{
809
{
792
	return viter->page(viter);
810
	return viter->page(viter);
793
}
811
}
794
 
812
 
795
/**
813
/**
796
 * Command submission - vmwgfx_execbuf.c
814
 * Command submission - vmwgfx_execbuf.c
797
 */
815
 */
798
 
816
 
799
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
817
extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
800
			     struct drm_file *file_priv);
818
			     struct drm_file *file_priv, size_t size);
801
extern int vmw_execbuf_process(struct drm_file *file_priv,
819
extern int vmw_execbuf_process(struct drm_file *file_priv,
802
			       struct vmw_private *dev_priv,
820
			       struct vmw_private *dev_priv,
803
			       void __user *user_commands,
821
			       void __user *user_commands,
804
			       void *kernel_commands,
822
			       void *kernel_commands,
805
			       uint32_t command_size,
823
			       uint32_t command_size,
806
			       uint64_t throttle_us,
824
			       uint64_t throttle_us,
-
 
825
			       uint32_t dx_context_handle,
807
			       struct drm_vmw_fence_rep __user
826
			       struct drm_vmw_fence_rep __user
808
			       *user_fence_rep,
827
			       *user_fence_rep,
809
			       struct vmw_fence_obj **out_fence);
828
			       struct vmw_fence_obj **out_fence);
810
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
829
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
811
					    struct vmw_fence_obj *fence);
830
					    struct vmw_fence_obj *fence);
812
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
831
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
813
 
832
 
814
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
833
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
815
				      struct vmw_private *dev_priv,
834
				      struct vmw_private *dev_priv,
816
				      struct vmw_fence_obj **p_fence,
835
				      struct vmw_fence_obj **p_fence,
817
				      uint32_t *p_handle);
836
				      uint32_t *p_handle);
818
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
837
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
819
					struct vmw_fpriv *vmw_fp,
838
					struct vmw_fpriv *vmw_fp,
820
					int ret,
839
					int ret,
821
					struct drm_vmw_fence_rep __user
840
					struct drm_vmw_fence_rep __user
822
					*user_fence_rep,
841
					*user_fence_rep,
823
					struct vmw_fence_obj *fence,
842
					struct vmw_fence_obj *fence,
824
					uint32_t fence_handle);
843
					uint32_t fence_handle);
-
 
844
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-
 
845
				      struct ttm_buffer_object *bo,
-
 
846
				      bool interruptible,
-
 
847
				      bool validate_as_mob);
-
 
848
 
825
 
849
 
826
/**
850
/**
827
 * IRQs and wating - vmwgfx_irq.c
851
 * IRQs and wating - vmwgfx_irq.c
828
 */
852
 */
829
 
853
 
830
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
854
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
831
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
855
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
832
			     uint32_t seqno, bool interruptible,
856
			  uint32_t seqno, bool interruptible,
833
			     unsigned long timeout);
857
			  unsigned long timeout);
834
extern void vmw_irq_preinstall(struct drm_device *dev);
858
extern void vmw_irq_preinstall(struct drm_device *dev);
835
extern int vmw_irq_postinstall(struct drm_device *dev);
859
extern int vmw_irq_postinstall(struct drm_device *dev);
836
extern void vmw_irq_uninstall(struct drm_device *dev);
860
extern void vmw_irq_uninstall(struct drm_device *dev);
837
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
861
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
838
				uint32_t seqno);
862
				uint32_t seqno);
839
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
863
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
840
			     bool lazy,
864
			     bool lazy,
841
			     bool fifo_idle,
865
			     bool fifo_idle,
842
			     uint32_t seqno,
866
			     uint32_t seqno,
843
			     bool interruptible,
867
			     bool interruptible,
844
			     unsigned long timeout);
868
			     unsigned long timeout);
845
extern void vmw_update_seqno(struct vmw_private *dev_priv,
869
extern void vmw_update_seqno(struct vmw_private *dev_priv,
846
				struct vmw_fifo_state *fifo_state);
870
				struct vmw_fifo_state *fifo_state);
847
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
871
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
848
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
872
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
849
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
873
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
850
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
874
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
-
 
875
extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
-
 
876
				   int *waiter_count);
-
 
877
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
-
 
878
				      u32 flag, int *waiter_count);
851
 
879
 
852
/**
880
/**
853
 * Rudimentary fence-like objects currently used only for throttling -
881
 * Rudimentary fence-like objects currently used only for throttling -
854
 * vmwgfx_marker.c
882
 * vmwgfx_marker.c
855
 */
883
 */
856
 
884
 
857
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
885
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
858
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
886
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
859
extern int vmw_marker_push(struct vmw_marker_queue *queue,
887
extern int vmw_marker_push(struct vmw_marker_queue *queue,
860
			  uint32_t seqno);
888
			   uint32_t seqno);
861
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
889
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
862
			  uint32_t signaled_seqno);
890
			   uint32_t signaled_seqno);
863
extern int vmw_wait_lag(struct vmw_private *dev_priv,
891
extern int vmw_wait_lag(struct vmw_private *dev_priv,
864
			struct vmw_marker_queue *queue, uint32_t us);
892
			struct vmw_marker_queue *queue, uint32_t us);
865
 
893
 
866
/**
894
/**
867
 * Kernel framebuffer - vmwgfx_fb.c
895
 * Kernel framebuffer - vmwgfx_fb.c
868
 */
896
 */
869
 
897
 
870
int vmw_fb_init(struct vmw_private *vmw_priv);
898
int vmw_fb_init(struct vmw_private *vmw_priv);
871
int vmw_fb_close(struct vmw_private *dev_priv);
899
int vmw_fb_close(struct vmw_private *dev_priv);
872
int vmw_fb_off(struct vmw_private *vmw_priv);
900
int vmw_fb_off(struct vmw_private *vmw_priv);
873
int vmw_fb_on(struct vmw_private *vmw_priv);
901
int vmw_fb_on(struct vmw_private *vmw_priv);
874
 
902
 
875
/**
903
/**
876
 * Kernel modesetting - vmwgfx_kms.c
904
 * Kernel modesetting - vmwgfx_kms.c
877
 */
905
 */
878
 
906
 
879
int vmw_kms_init(struct vmw_private *dev_priv);
907
int vmw_kms_init(struct vmw_private *dev_priv);
880
int vmw_kms_close(struct vmw_private *dev_priv);
908
int vmw_kms_close(struct vmw_private *dev_priv);
881
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
909
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
882
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
910
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
883
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
911
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
884
				struct drm_file *file_priv);
912
				struct drm_file *file_priv);
885
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
913
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
886
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
914
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
887
			  struct ttm_object_file *tfile,
915
			  struct ttm_object_file *tfile,
888
			  struct ttm_buffer_object *bo,
916
			  struct ttm_buffer_object *bo,
889
			  SVGA3dCmdHeader *header);
917
			  SVGA3dCmdHeader *header);
890
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
918
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
891
		       unsigned width, unsigned height, unsigned pitch,
919
		       unsigned width, unsigned height, unsigned pitch,
892
		       unsigned bpp, unsigned depth);
920
		       unsigned bpp, unsigned depth);
893
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
921
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
894
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
922
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
895
				uint32_t pitch,
923
				uint32_t pitch,
896
				uint32_t height);
924
				uint32_t height);
897
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
925
u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
898
int vmw_enable_vblank(struct drm_device *dev, int crtc);
926
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
899
void vmw_disable_vblank(struct drm_device *dev, int crtc);
927
void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
900
int vmw_kms_present(struct vmw_private *dev_priv,
928
int vmw_kms_present(struct vmw_private *dev_priv,
901
		    struct drm_file *file_priv,
929
		    struct drm_file *file_priv,
902
		    struct vmw_framebuffer *vfb,
930
		    struct vmw_framebuffer *vfb,
903
		    struct vmw_surface *surface,
931
		    struct vmw_surface *surface,
904
		    uint32_t sid, int32_t destX, int32_t destY,
932
		    uint32_t sid, int32_t destX, int32_t destY,
905
		    struct drm_vmw_rect *clips,
933
		    struct drm_vmw_rect *clips,
906
		    uint32_t num_clips);
934
		    uint32_t num_clips);
907
int vmw_kms_readback(struct vmw_private *dev_priv,
-
 
908
		     struct drm_file *file_priv,
-
 
909
		     struct vmw_framebuffer *vfb,
-
 
910
		     struct drm_vmw_fence_rep __user *user_fence_rep,
-
 
911
		     struct drm_vmw_rect *clips,
-
 
912
		     uint32_t num_clips);
-
 
913
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
935
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
914
				struct drm_file *file_priv);
936
				struct drm_file *file_priv);
-
 
937
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
915
 
938
 
916
int vmw_dumb_create(struct drm_file *file_priv,
939
int vmw_dumb_create(struct drm_file *file_priv,
917
		    struct drm_device *dev,
940
		    struct drm_device *dev,
918
		    struct drm_mode_create_dumb *args);
941
		    struct drm_mode_create_dumb *args);
919
 
942
 
920
int vmw_dumb_map_offset(struct drm_file *file_priv,
943
int vmw_dumb_map_offset(struct drm_file *file_priv,
921
			struct drm_device *dev, uint32_t handle,
944
			struct drm_device *dev, uint32_t handle,
922
			uint64_t *offset);
945
			uint64_t *offset);
923
int vmw_dumb_destroy(struct drm_file *file_priv,
946
int vmw_dumb_destroy(struct drm_file *file_priv,
924
		     struct drm_device *dev,
947
		     struct drm_device *dev,
925
		     uint32_t handle);
948
		     uint32_t handle);
-
 
949
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
-
 
950
extern void vmw_resource_unpin(struct vmw_resource *res);
-
 
951
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
-
 
952
 
926
/**
953
/**
927
 * Overlay control - vmwgfx_overlay.c
954
 * Overlay control - vmwgfx_overlay.c
928
 */
955
 */
929
 
956
 
930
int vmw_overlay_init(struct vmw_private *dev_priv);
957
int vmw_overlay_init(struct vmw_private *dev_priv);
931
int vmw_overlay_close(struct vmw_private *dev_priv);
958
int vmw_overlay_close(struct vmw_private *dev_priv);
932
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
959
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
933
		      struct drm_file *file_priv);
960
		      struct drm_file *file_priv);
934
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
961
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
935
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
962
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
936
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
963
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
937
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
964
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
938
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
965
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
939
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
966
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
940
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
967
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
941
 
968
 
942
/**
969
/**
943
 * GMR Id manager
970
 * GMR Id manager
944
 */
971
 */
945
 
972
 
946
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
973
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
947
 
974
 
948
/**
975
/**
-
 
976
 * Prime - vmwgfx_prime.c
-
 
977
 */
-
 
978
 
-
 
979
extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
-
 
980
extern int vmw_prime_fd_to_handle(struct drm_device *dev,
-
 
981
				  struct drm_file *file_priv,
-
 
982
				  int fd, u32 *handle);
-
 
983
extern int vmw_prime_handle_to_fd(struct drm_device *dev,
-
 
984
				  struct drm_file *file_priv,
-
 
985
				  uint32_t handle, uint32_t flags,
-
 
986
				  int *prime_fd);
-
 
987
 
949
/*
988
/*
950
 * MemoryOBject management -  vmwgfx_mob.c
989
 * MemoryOBject management -  vmwgfx_mob.c
951
 */
990
 */
952
struct vmw_mob;
991
struct vmw_mob;
953
extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
992
extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
954
			const struct vmw_sg_table *vsgt,
993
			const struct vmw_sg_table *vsgt,
955
			unsigned long num_data_pages, int32_t mob_id);
994
			unsigned long num_data_pages, int32_t mob_id);
956
extern void vmw_mob_unbind(struct vmw_private *dev_priv,
995
extern void vmw_mob_unbind(struct vmw_private *dev_priv,
957
			   struct vmw_mob *mob);
996
			   struct vmw_mob *mob);
958
extern void vmw_mob_destroy(struct vmw_mob *mob);
997
extern void vmw_mob_destroy(struct vmw_mob *mob);
959
extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
998
extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
960
extern int vmw_otables_setup(struct vmw_private *dev_priv);
999
extern int vmw_otables_setup(struct vmw_private *dev_priv);
961
extern void vmw_otables_takedown(struct vmw_private *dev_priv);
1000
extern void vmw_otables_takedown(struct vmw_private *dev_priv);
962
 
1001
 
963
/*
1002
/*
964
 * Context management - vmwgfx_context.c
1003
 * Context management - vmwgfx_context.c
965
 */
1004
 */
966
 
1005
 
967
extern const struct vmw_user_resource_conv *user_context_converter;
1006
extern const struct vmw_user_resource_conv *user_context_converter;
968
 
-
 
969
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
-
 
970
 
1007
 
971
extern int vmw_context_check(struct vmw_private *dev_priv,
1008
extern int vmw_context_check(struct vmw_private *dev_priv,
972
			     struct ttm_object_file *tfile,
1009
			     struct ttm_object_file *tfile,
973
			     int id,
1010
			     int id,
974
			     struct vmw_resource **p_res);
1011
			     struct vmw_resource **p_res);
975
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1012
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
976
				    struct drm_file *file_priv);
1013
				    struct drm_file *file_priv);
-
 
1014
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
-
 
1015
					     struct drm_file *file_priv);
977
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1016
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
978
				     struct drm_file *file_priv);
1017
				     struct drm_file *file_priv);
979
extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-
 
980
				   const struct vmw_ctx_bindinfo *ci);
-
 
981
extern void
-
 
982
vmw_context_binding_state_transfer(struct vmw_resource *res,
-
 
983
				   struct vmw_ctx_binding_state *cbs);
-
 
984
extern void vmw_context_binding_res_list_kill(struct list_head *head);
-
 
985
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
-
 
986
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
-
 
987
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1018
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
-
 
1019
extern struct vmw_cmdbuf_res_manager *
-
 
1020
vmw_context_res_man(struct vmw_resource *ctx);
-
 
1021
extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
-
 
1022
						SVGACOTableType cotable_type);
-
 
1023
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
-
 
1024
struct vmw_ctx_binding_state;
-
 
1025
extern struct vmw_ctx_binding_state *
-
 
1026
vmw_context_binding_state(struct vmw_resource *ctx);
-
 
1027
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
-
 
1028
					  bool readback);
-
 
1029
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
-
 
1030
				     struct vmw_dma_buffer *mob);
-
 
1031
extern struct vmw_dma_buffer *
-
 
1032
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
-
 
1033
 
988
 
1034
 
989
/*
1035
/*
990
 * Surface management - vmwgfx_surface.c
1036
 * Surface management - vmwgfx_surface.c
991
 */
1037
 */
992
 
1038
 
993
extern const struct vmw_user_resource_conv *user_surface_converter;
1039
extern const struct vmw_user_resource_conv *user_surface_converter;
994
 
1040
 
995
extern void vmw_surface_res_free(struct vmw_resource *res);
1041
extern void vmw_surface_res_free(struct vmw_resource *res);
-
 
1042
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
-
 
1043
				     struct drm_file *file_priv);
-
 
1044
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
-
 
1045
				    struct drm_file *file_priv);
-
 
1046
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
-
 
1047
				       struct drm_file *file_priv);
-
 
1048
extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
-
 
1049
				       struct drm_file *file_priv);
-
 
1050
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
-
 
1051
					  struct drm_file *file_priv);
996
extern int vmw_surface_check(struct vmw_private *dev_priv,
1052
extern int vmw_surface_check(struct vmw_private *dev_priv,
997
			     struct ttm_object_file *tfile,
1053
			     struct ttm_object_file *tfile,
998
			     uint32_t handle, int *id);
1054
			     uint32_t handle, int *id);
999
extern int vmw_surface_validate(struct vmw_private *dev_priv,
1055
extern int vmw_surface_validate(struct vmw_private *dev_priv,
1000
				struct vmw_surface *srf);
1056
				struct vmw_surface *srf);
-
 
1057
int vmw_surface_gb_priv_define(struct drm_device *dev,
-
 
1058
			       uint32_t user_accounting_size,
-
 
1059
			       uint32_t svga3d_flags,
-
 
1060
			       SVGA3dSurfaceFormat format,
-
 
1061
			       bool for_scanout,
-
 
1062
			       uint32_t num_mip_levels,
-
 
1063
			       uint32_t multisample_count,
-
 
1064
			       uint32_t array_size,
-
 
1065
			       struct drm_vmw_size size,
-
 
1066
			       struct vmw_surface **srf_out);
1001
 
1067
 
1002
/*
1068
/*
1003
 * Shader management - vmwgfx_shader.c
1069
 * Shader management - vmwgfx_shader.c
1004
 */
1070
 */
1005
 
1071
 
1006
extern const struct vmw_user_resource_conv *user_shader_converter;
1072
extern const struct vmw_user_resource_conv *user_shader_converter;
-
 
1073
 
-
 
1074
extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
-
 
1075
				   struct drm_file *file_priv);
-
 
1076
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
-
 
1077
				    struct drm_file *file_priv);
-
 
1078
extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
-
 
1079
				 struct vmw_cmdbuf_res_manager *man,
-
 
1080
				 u32 user_key, const void *bytecode,
-
 
1081
				 SVGA3dShaderType shader_type,
-
 
1082
				 size_t size,
-
 
1083
				 struct list_head *list);
-
 
1084
extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
-
 
1085
			     u32 user_key, SVGA3dShaderType shader_type,
-
 
1086
			     struct list_head *list);
-
 
1087
extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
-
 
1088
			     struct vmw_resource *ctx,
-
 
1089
			     u32 user_key,
-
 
1090
			     SVGA3dShaderType shader_type,
-
 
1091
			     struct list_head *list);
-
 
1092
extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
-
 
1093
					     struct list_head *list,
-
 
1094
					     bool readback);
-
 
1095
 
-
 
1096
extern struct vmw_resource *
-
 
1097
vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-
 
1098
		  u32 user_key, SVGA3dShaderType shader_type);
-
 
1099
 
-
 
1100
/*
-
 
1101
 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
-
 
1102
 */
-
 
1103
 
1007
extern struct vmw_cmdbuf_res_manager *
1104
extern struct vmw_cmdbuf_res_manager *
1008
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1105
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1009
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1106
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1010
extern size_t vmw_cmdbuf_res_man_size(void);
1107
extern size_t vmw_cmdbuf_res_man_size(void);
1011
extern struct vmw_resource *
1108
extern struct vmw_resource *
1012
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1109
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1013
		      enum vmw_cmdbuf_res_type res_type,
1110
		      enum vmw_cmdbuf_res_type res_type,
1014
		      u32 user_key);
1111
		      u32 user_key);
1015
extern void vmw_cmdbuf_res_revert(struct list_head *list);
1112
extern void vmw_cmdbuf_res_revert(struct list_head *list);
1016
extern void vmw_cmdbuf_res_commit(struct list_head *list);
1113
extern void vmw_cmdbuf_res_commit(struct list_head *list);
1017
extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1114
extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1018
			      enum vmw_cmdbuf_res_type res_type,
1115
			      enum vmw_cmdbuf_res_type res_type,
1019
			      u32 user_key,
1116
			      u32 user_key,
1020
			      struct vmw_resource *res,
1117
			      struct vmw_resource *res,
1021
			      struct list_head *list);
1118
			      struct list_head *list);
1022
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1119
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1023
				 enum vmw_cmdbuf_res_type res_type,
1120
				 enum vmw_cmdbuf_res_type res_type,
1024
				 u32 user_key,
1121
				 u32 user_key,
1025
				 struct list_head *list);
1122
				 struct list_head *list,
-
 
1123
				 struct vmw_resource **res);
-
 
1124
 
-
 
1125
/*
-
 
1126
 * COTable management - vmwgfx_cotable.c
-
 
1127
 */
-
 
1128
extern const SVGACOTableType vmw_cotable_scrub_order[];
-
 
1129
extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
-
 
1130
					      struct vmw_resource *ctx,
-
 
1131
					      u32 type);
-
 
1132
extern int vmw_cotable_notify(struct vmw_resource *res, int id);
-
 
1133
extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
-
 
1134
extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
-
 
1135
				     struct list_head *head);
-
 
1136
 
-
 
1137
/*
-
 
1138
 * Command buffer managerment vmwgfx_cmdbuf.c
-
 
1139
 */
-
 
1140
struct vmw_cmdbuf_man;
-
 
1141
struct vmw_cmdbuf_header;
-
 
1142
 
-
 
1143
extern struct vmw_cmdbuf_man *
-
 
1144
vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
-
 
1145
extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
-
 
1146
				    size_t size, size_t default_size);
-
 
1147
extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
-
 
1148
extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
-
 
1149
extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
-
 
1150
			   unsigned long timeout);
-
 
1151
extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
-
 
1152
				int ctx_id, bool interruptible,
-
 
1153
				struct vmw_cmdbuf_header *header);
-
 
1154
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
-
 
1155
			      struct vmw_cmdbuf_header *header,
-
 
1156
			      bool flush);
-
 
1157
extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
-
 
1158
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
-
 
1159
			      size_t size, bool interruptible,
-
 
1160
			      struct vmw_cmdbuf_header **p_header);
-
 
1161
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
-
 
1162
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
-
 
1163
				bool interruptible);
1026
 
1164
 
1027
 
1165
 
1028
/**
1166
/**
1029
 * Inline helper functions
1167
 * Inline helper functions
1030
 */
1168
 */
1031
 
1169
 
1032
static inline void vmw_surface_unreference(struct vmw_surface **srf)
1170
static inline void vmw_surface_unreference(struct vmw_surface **srf)
1033
{
1171
{
1034
	struct vmw_surface *tmp_srf = *srf;
1172
	struct vmw_surface *tmp_srf = *srf;
1035
	struct vmw_resource *res = &tmp_srf->res;
1173
	struct vmw_resource *res = &tmp_srf->res;
1036
	*srf = NULL;
1174
	*srf = NULL;
1037
 
1175
 
1038
	vmw_resource_unreference(&res);
1176
	vmw_resource_unreference(&res);
1039
}
1177
}
1040
 
1178
 
1041
static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1179
static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1042
{
1180
{
1043
	(void) vmw_resource_reference(&srf->res);
1181
	(void) vmw_resource_reference(&srf->res);
1044
	return srf;
1182
	return srf;
1045
}
1183
}
1046
 
1184
 
1047
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1185
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1048
{
1186
{
1049
	struct vmw_dma_buffer *tmp_buf = *buf;
1187
	struct vmw_dma_buffer *tmp_buf = *buf;
1050
 
1188
 
1051
	*buf = NULL;
1189
	*buf = NULL;
1052
	if (tmp_buf != NULL) {
1190
	if (tmp_buf != NULL) {
1053
		struct ttm_buffer_object *bo = &tmp_buf->base;
1191
		struct ttm_buffer_object *bo = &tmp_buf->base;
1054
 
1192
 
1055
		ttm_bo_unref(&bo);
1193
		ttm_bo_unref(&bo);
1056
	}
1194
	}
1057
}
1195
}
1058
 
1196
 
1059
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1197
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1060
{
1198
{
1061
    if (ttm_bo_reference(&buf->base))
1199
	if (ttm_bo_reference(&buf->base))
1062
		return buf;
1200
		return buf;
1063
	return NULL;
1201
	return NULL;
1064
}
1202
}
1065
 
1203
 
1066
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1204
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1067
{
1205
{
1068
	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1206
	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1069
}
1207
}
1070
 
1208
 
-
 
1209
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1071
extern struct drm_device *main_device;
1210
{
-
 
1211
	atomic_inc(&dev_priv->num_fifo_resources);
1072
extern struct drm_file   *drm_file_handlers[256];
1212
}
1073
 
1213
 
1074
typedef struct
-
 
1075
{
-
 
1076
  int width;
-
 
1077
  int height;
1214
static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1078
  int bpp;
1215
{
-
 
1216
	atomic_dec(&dev_priv->num_fifo_resources);
-
 
1217
}
-
 
1218
 
-
 
1219
/**
-
 
1220
 * vmw_mmio_read - Perform a MMIO read from volatile memory
-
 
1221
 *
-
 
1222
 * @addr: The address to read from
-
 
1223
 *
-
 
1224
 * This function is intended to be equivalent to ioread32() on
-
 
1225
 * memremap'd memory, but without byteswapping.
-
 
1226
 */
-
 
1227
static inline u32 vmw_mmio_read(u32 *addr)
-
 
1228
{
-
 
1229
	return READ_ONCE(*addr);
-
 
1230
}
-
 
1231
 
-
 
1232
/**
-
 
1233
 * vmw_mmio_write - Perform a MMIO write to volatile memory
-
 
1234
 *
-
 
1235
 * @addr: The address to write to
-
 
1236
 *
-
 
1237
 * This function is intended to be equivalent to iowrite32 on
-
 
1238
 * memremap'd memory, but without byteswapping.
-
 
1239
 */
-
 
1240
static inline void vmw_mmio_write(u32 value, u32 *addr)
1079
  int freq;
1241
{
1080
}videomode_t;
1242
	WRITE_ONCE(*addr, value);
1081
 
1243
}
1082
#endif
1244
#endif