Subversion Repositories Kolibri OS

Rev

Rev 4111 | Rev 5078 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4111 Rev 4569
Line 30... Line 30...
30
 
30
 
31
#include "vmwgfx_reg.h"
31
#include "vmwgfx_reg.h"
32
#include 
32
#include 
33
#include 
33
#include 
-
 
34
#include 
34
#include 
35
#include 
35
//#include 
36
//#include 
36
#include 
37
#include 
37
#include 
38
#include 
38
//#include 
39
//#include 
39
#include 
40
#include 
40
//#include 
41
//#include 
Line 41... Line 42...
41
#include "vmwgfx_fence.h"
42
#include "vmwgfx_fence.h"
42
 
43
 
43
#define VMWGFX_DRIVER_DATE "20120209"
44
#define VMWGFX_DRIVER_DATE "20121114"
44
#define VMWGFX_DRIVER_MAJOR 2
45
#define VMWGFX_DRIVER_MAJOR 2
45
#define VMWGFX_DRIVER_MINOR 4
46
#define VMWGFX_DRIVER_MINOR 5
46
#define VMWGFX_DRIVER_PATCHLEVEL 0
47
#define VMWGFX_DRIVER_PATCHLEVEL 0
47
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49
#define VMWGFX_MAX_RELOCATIONS 2048
50
#define VMWGFX_MAX_RELOCATIONS 2048
50
#define VMWGFX_MAX_VALIDATIONS 2048
51
#define VMWGFX_MAX_VALIDATIONS 2048
-
 
52
#define VMWGFX_MAX_DISPLAYS 16
-
 
53
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
-
 
54
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
-
 
55
 
-
 
56
/*
-
 
57
 * Perhaps we should have sysfs entries for these.
-
 
58
 */
-
 
59
#define VMWGFX_NUM_GB_CONTEXT 256
-
 
60
#define VMWGFX_NUM_GB_SHADER 20000
-
 
61
#define VMWGFX_NUM_GB_SURFACE 32768
-
 
62
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
-
 
63
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
-
 
64
			VMWGFX_NUM_GB_SHADER +\
Line 51... Line 65...
51
#define VMWGFX_MAX_DISPLAYS 16
65
			VMWGFX_NUM_GB_SURFACE +\
52
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
66
			VMWGFX_NUM_GB_SCREEN_TARGET)
-
 
67
 
-
 
68
#define VMW_PL_GMR TTM_PL_PRIV0
Line 53... Line 69...
53
 
69
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
54
#define VMW_PL_GMR TTM_PL_PRIV0
70
#define VMW_PL_MOB TTM_PL_PRIV1
55
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
71
#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
56
 
72
 
-
 
73
#define VMW_RES_CONTEXT ttm_driver_type0
Line 57... Line 74...
57
#define VMW_RES_CONTEXT ttm_driver_type0
74
#define VMW_RES_SURFACE ttm_driver_type1
Line 58... Line 75...
58
#define VMW_RES_SURFACE ttm_driver_type1
75
#define VMW_RES_STREAM ttm_driver_type2
59
#define VMW_RES_STREAM ttm_driver_type2
76
#define VMW_RES_FENCE ttm_driver_type3
Line 96... Line 113...
96
 * on top of the info needed by TTM.
113
 * on top of the info needed by TTM.
97
 */
114
 */
98
struct vmw_validate_buffer {
115
struct vmw_validate_buffer {
99
   struct ttm_validate_buffer base;
116
   struct ttm_validate_buffer base;
100
   struct drm_hash_item hash;
117
   struct drm_hash_item hash;
-
 
118
	bool validate_as_mob;
101
};
119
};
Line 102... Line 120...
102
 
120
 
103
struct vmw_res_func;
121
struct vmw_res_func;
104
struct vmw_resource {
122
struct vmw_resource {
Line 112... Line 130...
112
    struct vmw_dma_buffer *backup;
130
    struct vmw_dma_buffer *backup;
113
	unsigned long backup_offset;
131
	unsigned long backup_offset;
114
	const struct vmw_res_func *func;
132
	const struct vmw_res_func *func;
115
	struct list_head lru_head; /* Protected by the resource lock */
133
	struct list_head lru_head; /* Protected by the resource lock */
116
	struct list_head mob_head; /* Protected by @backup reserved */
134
	struct list_head mob_head; /* Protected by @backup reserved */
-
 
135
	struct list_head binding_head; /* Protected by binding_mutex */
117
	void (*res_free) (struct vmw_resource *res);
136
	void (*res_free) (struct vmw_resource *res);
118
	void (*hw_destroy) (struct vmw_resource *res);
137
	void (*hw_destroy) (struct vmw_resource *res);
119
};
138
};
Line 120... Line 139...
120
 
139
 
121
enum vmw_res_type {
140
enum vmw_res_type {
122
	vmw_res_context,
141
	vmw_res_context,
123
	vmw_res_surface,
142
	vmw_res_surface,
-
 
143
	vmw_res_stream,
124
	vmw_res_stream,
144
	vmw_res_shader,
125
	vmw_res_max
145
	vmw_res_max
Line 126... Line 146...
126
};
146
};
127
 
147
 
Line 168... Line 188...
168
	struct rw_semaphore rwsem;
188
	struct rw_semaphore rwsem;
169
	struct vmw_marker_queue marker_queue;
189
	struct vmw_marker_queue marker_queue;
170
};
190
};
Line 171... Line 191...
171
 
191
 
-
 
192
struct vmw_relocation {
172
struct vmw_relocation {
193
	SVGAMobId *mob_loc;
173
	SVGAGuestPtr *location;
194
	SVGAGuestPtr *location;
174
	uint32_t index;
195
	uint32_t index;
Line 175... Line 196...
175
};
196
};
Line 191... Line 212...
191
	uint32_t handle;
212
	uint32_t handle;
192
	struct vmw_resource *res;
213
	struct vmw_resource *res;
193
	struct vmw_resource_val_node *node;
214
	struct vmw_resource_val_node *node;
194
};
215
};
Line -... Line 216...
-
 
216
 
-
 
217
/**
-
 
218
 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
-
 
219
 */
-
 
220
enum vmw_dma_map_mode {
-
 
221
	vmw_dma_phys,           /* Use physical page addresses */
-
 
222
	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
-
 
223
	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
-
 
224
	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
-
 
225
	vmw_dma_map_max
-
 
226
};
-
 
227
 
-
 
228
/**
-
 
229
 * struct vmw_sg_table - Scatter/gather table for binding, with additional
-
 
230
 * device-specific information.
-
 
231
 *
-
 
232
 * @sgt: Pointer to a struct sg_table with binding information
-
 
233
 * @num_regions: Number of regions with device-address contigous pages
-
 
234
 */
-
 
235
struct vmw_sg_table {
-
 
236
	enum vmw_dma_map_mode mode;
-
 
237
	struct page **pages;
-
 
238
	const dma_addr_t *addrs;
-
 
239
	struct sg_table *sgt;
-
 
240
	unsigned long num_regions;
-
 
241
	unsigned long num_pages;
-
 
242
};
-
 
243
 
-
 
244
/**
-
 
245
 * struct vmw_piter - Page iterator that iterates over a list of pages
-
 
246
 * and DMA addresses that could be either a scatter-gather list or
-
 
247
 * arrays
-
 
248
 *
-
 
249
 * @pages: Array of page pointers to the pages.
-
 
250
 * @addrs: DMA addresses to the pages if coherent pages are used.
-
 
251
 * @iter: Scatter-gather page iterator. Current position in SG list.
-
 
252
 * @i: Current position in arrays.
-
 
253
 * @num_pages: Number of pages total.
-
 
254
 * @next: Function to advance the iterator. Returns false if past the list
-
 
255
 * of pages, true otherwise.
-
 
256
 * @dma_address: Function to return the DMA address of the current page.
-
 
257
 */
-
 
258
struct vmw_piter {
-
 
259
	struct page **pages;
-
 
260
	const dma_addr_t *addrs;
-
 
261
	struct sg_page_iter iter;
-
 
262
	unsigned long i;
-
 
263
	unsigned long num_pages;
-
 
264
	bool (*next)(struct vmw_piter *);
-
 
265
	dma_addr_t (*dma_address)(struct vmw_piter *);
-
 
266
	struct page *(*page)(struct vmw_piter *);
-
 
267
};
-
 
268
 
-
 
269
/*
-
 
270
 * enum vmw_ctx_binding_type - abstract resource to context binding types
-
 
271
 */
-
 
272
enum vmw_ctx_binding_type {
-
 
273
	vmw_ctx_binding_shader,
-
 
274
	vmw_ctx_binding_rt,
-
 
275
	vmw_ctx_binding_tex,
-
 
276
	vmw_ctx_binding_max
-
 
277
};
-
 
278
 
-
 
279
/**
-
 
280
 * struct vmw_ctx_bindinfo - structure representing a single context binding
-
 
281
 *
-
 
282
 * @ctx: Pointer to the context structure. NULL means the binding is not
-
 
283
 * active.
-
 
284
 * @res: Non ref-counted pointer to the bound resource.
-
 
285
 * @bt: The binding type.
-
 
286
 * @i1: Union of information needed to unbind.
-
 
287
 */
-
 
288
struct vmw_ctx_bindinfo {
-
 
289
	struct vmw_resource *ctx;
-
 
290
	struct vmw_resource *res;
-
 
291
	enum vmw_ctx_binding_type bt;
-
 
292
	union {
-
 
293
		SVGA3dShaderType shader_type;
-
 
294
		SVGA3dRenderTargetType rt_type;
-
 
295
		uint32 texture_stage;
-
 
296
	} i1;
-
 
297
};
-
 
298
 
-
 
299
/**
-
 
300
 * struct vmw_ctx_binding - structure representing a single context binding
-
 
301
 *                        - suitable for tracking in a context
-
 
302
 *
-
 
303
 * @ctx_list: List head for context.
-
 
304
 * @res_list: List head for bound resource.
-
 
305
 * @bi: Binding info
-
 
306
 */
-
 
307
struct vmw_ctx_binding {
-
 
308
	struct list_head ctx_list;
-
 
309
	struct list_head res_list;
-
 
310
	struct vmw_ctx_bindinfo bi;
-
 
311
};
-
 
312
 
-
 
313
 
-
 
314
/**
-
 
315
 * struct vmw_ctx_binding_state - context binding state
-
 
316
 *
-
 
317
 * @list: linked list of individual bindings.
-
 
318
 * @render_targets: Render target bindings.
-
 
319
 * @texture_units: Texture units/samplers bindings.
-
 
320
 * @shaders: Shader bindings.
-
 
321
 *
-
 
322
 * Note that this structure also provides storage space for the individual
-
 
323
 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
-
 
324
 * for individual bindings.
-
 
325
 *
-
 
326
 */
-
 
327
struct vmw_ctx_binding_state {
-
 
328
	struct list_head list;
-
 
329
	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
-
 
330
	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
-
 
331
	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
-
 
332
};
195
 
333
 
196
struct vmw_sw_context{
334
struct vmw_sw_context{
197
	struct drm_open_hash res_ht;
335
	struct drm_open_hash res_ht;
198
	bool res_ht_initialized;
336
	bool res_ht_initialized;
199
	bool kernel; /**< is the called made from the kernel */
337
	bool kernel; /**< is the called made from the kernel */
Line 212... Line 350...
212
	uint32_t *buf_start;
350
	uint32_t *buf_start;
213
	struct vmw_res_cache_entry res_cache[vmw_res_max];
351
	struct vmw_res_cache_entry res_cache[vmw_res_max];
214
	struct vmw_resource *last_query_ctx;
352
	struct vmw_resource *last_query_ctx;
215
	bool needs_post_query_barrier;
353
	bool needs_post_query_barrier;
216
	struct vmw_resource *error_resource;
354
	struct vmw_resource *error_resource;
-
 
355
	struct vmw_ctx_binding_state staged_bindings;
217
};
356
};
Line 218... Line 357...
218
 
357
 
219
struct vmw_legacy_display;
358
struct vmw_legacy_display;
Line 243... Line 382...
243
	struct drm_device *dev;
382
	struct drm_device *dev;
244
	unsigned long vmw_chipset;
383
	unsigned long vmw_chipset;
245
	unsigned int io_start;
384
	unsigned int io_start;
246
	uint32_t vram_start;
385
	uint32_t vram_start;
247
	uint32_t vram_size;
386
	uint32_t vram_size;
-
 
387
	uint32_t prim_bb_mem;
248
	uint32_t mmio_start;
388
	uint32_t mmio_start;
249
	uint32_t mmio_size;
389
	uint32_t mmio_size;
250
	uint32_t fb_max_width;
390
	uint32_t fb_max_width;
251
	uint32_t fb_max_height;
391
	uint32_t fb_max_height;
252
	uint32_t initial_width;
392
	uint32_t initial_width;
253
	uint32_t initial_height;
393
	uint32_t initial_height;
254
	__le32 __iomem *mmio_virt;
394
	__le32 __iomem *mmio_virt;
255
	int mmio_mtrr;
395
	int mmio_mtrr;
256
	uint32_t capabilities;
396
	uint32_t capabilities;
257
	uint32_t max_gmr_descriptors;
-
 
258
	uint32_t max_gmr_ids;
397
	uint32_t max_gmr_ids;
259
	uint32_t max_gmr_pages;
398
	uint32_t max_gmr_pages;
-
 
399
	uint32_t max_mob_pages;
260
	uint32_t memory_size;
400
	uint32_t memory_size;
261
	bool has_gmr;
401
	bool has_gmr;
-
 
402
	bool has_mob;
262
	struct mutex hw_mutex;
403
	struct mutex hw_mutex;
Line 263... Line 404...
263
 
404
 
264
	/*
405
	/*
265
	 * VGA registers.
406
	 * VGA registers.
Line 332... Line 473...
332
	 * Protected by the cmdbuf mutex.
473
	 * Protected by the cmdbuf mutex.
333
	 */
474
	 */
Line 334... Line 475...
334
 
475
 
335
	struct vmw_sw_context ctx;
476
	struct vmw_sw_context ctx;
-
 
477
	struct mutex cmdbuf_mutex;
Line 336... Line 478...
336
	struct mutex cmdbuf_mutex;
478
	struct mutex binding_mutex;
337
 
479
 
338
	/**
480
	/**
Line 344... Line 486...
344
 
486
 
345
	/**
487
	/**
346
	 * Master management.
488
	 * Master management.
Line 347... Line 489...
347
	 */
489
	 */
348
 
490
 
349
//   struct vmw_master *active_master;
491
	struct vmw_master *active_master;
350
//   struct vmw_master fbdev_master;
492
	struct vmw_master fbdev_master;
Line 351... Line 493...
351
//   struct notifier_block pm_nb;
493
//	struct notifier_block pm_nb;
352
	bool suspended;
494
	bool suspended;
Line 372... Line 514...
372
	 * protected by the cmdbuf mutex for simplicity.
514
	 * protected by the cmdbuf mutex for simplicity.
373
	 */
515
	 */
Line 374... Line 516...
374
 
516
 
375
	struct list_head res_lru[vmw_res_max];
517
	struct list_head res_lru[vmw_res_max];
-
 
518
	uint32_t used_memory_size;
-
 
519
 
-
 
520
	/*
-
 
521
	 * DMA mapping stuff.
-
 
522
	 */
-
 
523
	enum vmw_dma_map_mode map_mode;
-
 
524
 
-
 
525
	/*
-
 
526
	 * Guest Backed stuff
-
 
527
	 */
-
 
528
	struct ttm_buffer_object *otable_bo;
376
	uint32_t used_memory_size;
529
	struct vmw_otable *otables;
Line 377... Line 530...
377
};
530
};
378
 
531
 
379
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
532
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
Line 419... Line 572...
419
/**
572
/**
420
 * GMR utilities - vmwgfx_gmr.c
573
 * GMR utilities - vmwgfx_gmr.c
421
 */
574
 */
Line 422... Line 575...
422
 
575
 
423
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
576
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
424
			struct page *pages[],
577
			const struct vmw_sg_table *vsgt,
425
			unsigned long num_pages,
578
			unsigned long num_pages,
426
			int gmr_id);
579
			int gmr_id);
Line 427... Line 580...
427
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
580
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
428
 
581
 
429
/**
582
/**
430
 * Resource utilities - vmwgfx_resource.c
583
 * Resource utilities - vmwgfx_resource.c
431
 */
-
 
432
struct vmw_user_resource_conv;
-
 
Line 433... Line -...
433
extern const struct vmw_user_resource_conv *user_surface_converter;
-
 
434
extern const struct vmw_user_resource_conv *user_context_converter;
584
 */
435
 
585
struct vmw_user_resource_conv;
436
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
586
 
437
extern void vmw_resource_unreference(struct vmw_resource **p_res);
587
extern void vmw_resource_unreference(struct vmw_resource **p_res);
438
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
588
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
439
extern int vmw_resource_validate(struct vmw_resource *res);
-
 
440
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
-
 
441
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-
 
442
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
-
 
443
				     struct drm_file *file_priv);
-
 
444
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-
 
445
				    struct drm_file *file_priv);
-
 
446
extern int vmw_context_check(struct vmw_private *dev_priv,
-
 
447
			     struct ttm_object_file *tfile,
589
extern int vmw_resource_validate(struct vmw_resource *res);
448
			     int id,
590
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
449
			     struct vmw_resource **p_res);
591
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
450
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
592
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
451
				  struct ttm_object_file *tfile,
593
				  struct ttm_object_file *tfile,
Line 456... Line 598...
456
	struct vmw_private *dev_priv,
598
	struct vmw_private *dev_priv,
457
	struct ttm_object_file *tfile,
599
	struct ttm_object_file *tfile,
458
	uint32_t handle,
600
	uint32_t handle,
459
	const struct vmw_user_resource_conv *converter,
601
	const struct vmw_user_resource_conv *converter,
460
	struct vmw_resource **p_res);
602
	struct vmw_resource **p_res);
461
extern void vmw_surface_res_free(struct vmw_resource *res);
-
 
462
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
-
 
463
				     struct drm_file *file_priv);
-
 
464
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
-
 
465
				    struct drm_file *file_priv);
-
 
466
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
-
 
467
				       struct drm_file *file_priv);
-
 
468
extern int vmw_surface_check(struct vmw_private *dev_priv,
-
 
469
			     struct ttm_object_file *tfile,
-
 
470
			     uint32_t handle, int *id);
-
 
471
extern int vmw_surface_validate(struct vmw_private *dev_priv,
-
 
472
				struct vmw_surface *srf);
-
 
473
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
603
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
474
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
604
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
475
			   struct vmw_dma_buffer *vmw_bo,
605
			   struct vmw_dma_buffer *vmw_bo,
476
			   size_t size, struct ttm_placement *placement,
606
			   size_t size, struct ttm_placement *placement,
477
			   bool interuptable,
607
			   bool interuptable,
478
			   void (*bo_free) (struct ttm_buffer_object *bo));
608
			   void (*bo_free) (struct ttm_buffer_object *bo));
479
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
609
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
480
				  struct ttm_object_file *tfile);
610
				  struct ttm_object_file *tfile);
-
 
611
extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
-
 
612
				 struct ttm_object_file *tfile,
-
 
613
				 uint32_t size,
-
 
614
				 bool shareable,
-
 
615
				 uint32_t *handle,
-
 
616
				 struct vmw_dma_buffer **p_dma_buf);
-
 
617
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
-
 
618
				     struct vmw_dma_buffer *dma_buf,
-
 
619
				     uint32_t *handle);
481
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
620
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
482
				  struct drm_file *file_priv);
621
				  struct drm_file *file_priv);
483
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
622
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
484
				  struct drm_file *file_priv);
623
				  struct drm_file *file_priv);
-
 
624
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
-
 
625
					 struct drm_file *file_priv);
485
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
626
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
486
					 uint32_t cur_validate_node);
627
					 uint32_t cur_validate_node);
487
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
628
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
488
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
629
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
489
				  uint32_t id, struct vmw_dma_buffer **out);
630
				  uint32_t id, struct vmw_dma_buffer **out);
Line 537... Line 678...
537
				struct drm_file *file_priv);
678
				struct drm_file *file_priv);
538
extern int vmw_present_ioctl(struct drm_device *dev, void *data,
679
extern int vmw_present_ioctl(struct drm_device *dev, void *data,
539
			     struct drm_file *file_priv);
680
			     struct drm_file *file_priv);
540
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
681
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
541
				      struct drm_file *file_priv);
682
				      struct drm_file *file_priv);
542
//extern unsigned int vmw_fops_poll(struct file *filp,
-
 
543
//                 struct poll_table_struct *wait);
-
 
544
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
683
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
545
			     size_t count, loff_t *offset);
684
			     size_t count, loff_t *offset);
Line 546... Line 685...
546
 
685
 
547
/**
686
/**
Line 572... Line 711...
572
 
711
 
573
/**
712
/**
574
 * TTM buffer object driver - vmwgfx_buffer.c
713
 * TTM buffer object driver - vmwgfx_buffer.c
Line -... Line 714...
-
 
714
 */
575
 */
715
 
576
 
716
extern const size_t vmw_tt_size;
577
extern struct ttm_placement vmw_vram_placement;
717
extern struct ttm_placement vmw_vram_placement;
578
extern struct ttm_placement vmw_vram_ne_placement;
718
extern struct ttm_placement vmw_vram_ne_placement;
579
extern struct ttm_placement vmw_vram_sys_placement;
719
extern struct ttm_placement vmw_vram_sys_placement;
580
extern struct ttm_placement vmw_vram_gmr_placement;
720
extern struct ttm_placement vmw_vram_gmr_placement;
-
 
721
extern struct ttm_placement vmw_vram_gmr_ne_placement;
581
extern struct ttm_placement vmw_vram_gmr_ne_placement;
722
extern struct ttm_placement vmw_sys_placement;
582
extern struct ttm_placement vmw_sys_placement;
723
extern struct ttm_placement vmw_sys_ne_placement;
-
 
724
extern struct ttm_placement vmw_evictable_placement;
583
extern struct ttm_placement vmw_evictable_placement;
725
extern struct ttm_placement vmw_srf_placement;
584
extern struct ttm_placement vmw_srf_placement;
726
extern struct ttm_placement vmw_mob_placement;
-
 
727
extern struct ttm_bo_driver vmw_bo_driver;
-
 
728
extern int vmw_dma_quiescent(struct drm_device *dev);
-
 
729
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
-
 
730
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
-
 
731
extern const struct vmw_sg_table *
-
 
732
vmw_bo_sg_table(struct ttm_buffer_object *bo);
-
 
733
extern void vmw_piter_start(struct vmw_piter *viter,
-
 
734
			    const struct vmw_sg_table *vsgt,
-
 
735
			    unsigned long p_offs);
-
 
736
 
-
 
737
/**
-
 
738
 * vmw_piter_next - Advance the iterator one page.
-
 
739
 *
-
 
740
 * @viter: Pointer to the iterator to advance.
-
 
741
 *
-
 
742
 * Returns false if past the list of pages, true otherwise.
-
 
743
 */
-
 
744
static inline bool vmw_piter_next(struct vmw_piter *viter)
-
 
745
{
-
 
746
	return viter->next(viter);
-
 
747
}
-
 
748
 
-
 
749
/**
-
 
750
 * vmw_piter_dma_addr - Return the DMA address of the current page.
-
 
751
 *
-
 
752
 * @viter: Pointer to the iterator
-
 
753
 *
-
 
754
 * Returns the DMA address of the page pointed to by @viter.
-
 
755
 */
-
 
756
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
-
 
757
{
-
 
758
	return viter->dma_address(viter);
-
 
759
}
-
 
760
 
-
 
761
/**
-
 
762
 * vmw_piter_page - Return a pointer to the current page.
-
 
763
 *
-
 
764
 * @viter: Pointer to the iterator
-
 
765
 *
-
 
766
 * Returns the DMA address of the page pointed to by @viter.
-
 
767
 */
-
 
768
static inline struct page *vmw_piter_page(struct vmw_piter *viter)
-
 
769
{
Line 585... Line 770...
585
extern struct ttm_bo_driver vmw_bo_driver;
770
	return viter->page(viter);
586
extern int vmw_dma_quiescent(struct drm_device *dev);
771
}
587
 
772
 
Line 618... Line 803...
618
 
803
 
619
/**
804
/**
620
 * IRQs and wating - vmwgfx_irq.c
805
 * IRQs and wating - vmwgfx_irq.c
Line 621... Line 806...
621
 */
806
 */
622
 
807
 
623
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
808
extern irqreturn_t vmw_irq_handler(int irq, void *arg);
624
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
809
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
625
			     uint32_t seqno, bool interruptible,
810
			     uint32_t seqno, bool interruptible,
626
			     unsigned long timeout);
811
			     unsigned long timeout);
Line 737... Line 922...
737
 */
922
 */
Line 738... Line 923...
738
 
923
 
Line 739... Line 924...
739
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
924
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
-
 
925
 
-
 
926
/**
-
 
927
/*
-
 
928
 * MemoryOBject management -  vmwgfx_mob.c
-
 
929
 */
-
 
930
struct vmw_mob;
-
 
931
extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
-
 
932
			const struct vmw_sg_table *vsgt,
-
 
933
			unsigned long num_data_pages, int32_t mob_id);
-
 
934
extern void vmw_mob_unbind(struct vmw_private *dev_priv,
-
 
935
			   struct vmw_mob *mob);
-
 
936
extern void vmw_mob_destroy(struct vmw_mob *mob);
-
 
937
extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
-
 
938
extern int vmw_otables_setup(struct vmw_private *dev_priv);
-
 
939
extern void vmw_otables_takedown(struct vmw_private *dev_priv);
-
 
940
 
-
 
941
/*
-
 
942
 * Context management - vmwgfx_context.c
-
 
943
 */
-
 
944
 
-
 
945
extern const struct vmw_user_resource_conv *user_context_converter;
-
 
946
 
-
 
947
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
-
 
948
 
-
 
949
extern int vmw_context_check(struct vmw_private *dev_priv,
-
 
950
			     struct ttm_object_file *tfile,
-
 
951
			     int id,
-
 
952
			     struct vmw_resource **p_res);
-
 
953
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-
 
954
				    struct drm_file *file_priv);
-
 
955
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
-
 
956
				     struct drm_file *file_priv);
-
 
957
extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-
 
958
				   const struct vmw_ctx_bindinfo *ci);
-
 
959
extern void
-
 
960
vmw_context_binding_state_transfer(struct vmw_resource *res,
-
 
961
				   struct vmw_ctx_binding_state *cbs);
-
 
962
extern void vmw_context_binding_res_list_kill(struct list_head *head);
-
 
963
 
-
 
964
/*
-
 
965
 * Surface management - vmwgfx_surface.c
-
 
966
 */
-
 
967
 
-
 
968
extern const struct vmw_user_resource_conv *user_surface_converter;
-
 
969
 
-
 
970
extern void vmw_surface_res_free(struct vmw_resource *res);
-
 
971
extern int vmw_surface_check(struct vmw_private *dev_priv,
-
 
972
			     struct ttm_object_file *tfile,
-
 
973
			     uint32_t handle, int *id);
-
 
974
extern int vmw_surface_validate(struct vmw_private *dev_priv,
-
 
975
				struct vmw_surface *srf);
-
 
976
 
-
 
977
/*
-
 
978
 * Shader management - vmwgfx_shader.c
-
 
979
 */
-
 
980
 
740
 
981
extern const struct vmw_user_resource_conv *user_shader_converter;
741
/**
982
/**
Line 742... Line 983...
742
 * Inline helper functions
983
 * Inline helper functions
743
 */
984
 */