Subversion Repositories Kolibri OS

Rev

Rev 4111 | Rev 4570 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4111 Rev 4569
Line 30... Line 30...
30
#include "vmwgfx_drv.h"
30
#include "vmwgfx_drv.h"
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
//#include 
34
//#include 
-
 
35
#include 
Line 35... Line 36...
35
 
36
 
36
#define VMWGFX_DRIVER_NAME "vmwgfx"
37
#define VMWGFX_DRIVER_NAME "vmwgfx"
37
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
38
#define VMWGFX_CHIP_SVGAII 0
39
#define VMWGFX_CHIP_SVGAII 0
Line 109... Line 110...
109
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
110
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
110
		 struct drm_vmw_present_readback_arg)
111
		 struct drm_vmw_present_readback_arg)
111
#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
112
#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
112
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
113
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
113
		 struct drm_vmw_update_layout_arg)
114
		 struct drm_vmw_update_layout_arg)
-
 
115
#define DRM_IOCTL_VMW_CREATE_SHADER				\
-
 
116
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
-
 
117
		 struct drm_vmw_shader_create_arg)
-
 
118
#define DRM_IOCTL_VMW_UNREF_SHADER				\
-
 
119
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
-
 
120
		 struct drm_vmw_shader_arg)
-
 
121
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
-
 
122
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
-
 
123
		 union drm_vmw_gb_surface_create_arg)
-
 
124
#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
-
 
125
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
-
 
126
		 union drm_vmw_gb_surface_reference_arg)
-
 
127
#define DRM_IOCTL_VMW_SYNCCPU					\
-
 
128
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
-
 
129
		 struct drm_vmw_synccpu_arg)
Line 114... Line 130...
114
 
130
 
115
/**
131
/**
116
 * The core DRM version of this macro doesn't account for
132
 * The core DRM version of this macro doesn't account for
117
 * DRM_COMMAND_BASE.
133
 * DRM_COMMAND_BASE.
Line 174... Line 190...
174
		      vmw_present_readback_ioctl,
190
		      vmw_present_readback_ioctl,
175
		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
191
		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
176
	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
192
	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
177
		      vmw_kms_update_layout_ioctl,
193
		      vmw_kms_update_layout_ioctl,
178
		      DRM_MASTER | DRM_UNLOCKED),
194
		      DRM_MASTER | DRM_UNLOCKED),
-
 
195
	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
-
 
196
		      vmw_shader_define_ioctl,
-
 
197
		      DRM_AUTH | DRM_UNLOCKED),
-
 
198
	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
-
 
199
		      vmw_shader_destroy_ioctl,
-
 
200
		      DRM_AUTH | DRM_UNLOCKED),
-
 
201
	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
-
 
202
		      vmw_gb_surface_define_ioctl,
-
 
203
		      DRM_AUTH | DRM_UNLOCKED),
-
 
204
	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
-
 
205
		      vmw_gb_surface_reference_ioctl,
-
 
206
		      DRM_AUTH | DRM_UNLOCKED),
-
 
207
	VMW_IOCTL_DEF(VMW_SYNCCPU,
-
 
208
		      vmw_user_dmabuf_synccpu_ioctl,
-
 
209
		      DRM_AUTH | DRM_UNLOCKED),
179
};
210
};
180
#endif
211
#endif
Line 181... Line 212...
181
 
212
 
182
static struct pci_device_id vmw_pci_id_list[] = {
213
static struct pci_device_id vmw_pci_id_list[] = {
183
	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
214
	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
184
	{0, 0, 0}
215
	{0, 0, 0}
Line 185... Line 216...
185
};
216
};
-
 
217
 
-
 
218
static int enable_fbdev = 1;
-
 
219
static int vmw_force_iommu;
-
 
220
static int vmw_restrict_iommu;
Line 186... Line 221...
186
 
221
static int vmw_force_coherent;
187
static int enable_fbdev = 1;
222
static int vmw_restrict_dma_mask;
Line 188... Line 223...
188
 
223
 
189
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
224
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
-
 
225
static void vmw_master_init(struct vmw_master *);
-
 
226
 
-
 
227
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
-
 
228
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
-
 
229
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
-
 
230
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
-
 
231
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
-
 
232
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
-
 
233
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
Line 190... Line 234...
190
static void vmw_master_init(struct vmw_master *);
234
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
191
 
235
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
192
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
236
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
193
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
237
 
Line 225... Line 269...
225
		DRM_INFO("  Traces.\n");
269
		DRM_INFO("  Traces.\n");
226
	if (capabilities & SVGA_CAP_GMR2)
270
	if (capabilities & SVGA_CAP_GMR2)
227
		DRM_INFO("  GMR2.\n");
271
		DRM_INFO("  GMR2.\n");
228
	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
272
	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
229
		DRM_INFO("  Screen Object 2.\n");
273
		DRM_INFO("  Screen Object 2.\n");
-
 
274
	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
-
 
275
		DRM_INFO("  Command Buffers.\n");
-
 
276
	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
-
 
277
		DRM_INFO("  Command Buffers 2.\n");
-
 
278
	if (capabilities & SVGA_CAP_GBOBJECTS)
-
 
279
		DRM_INFO("  Guest Backed Resources.\n");
230
}
280
}
Line 231... Line -...
231
 
-
 
232
 
281
 
233
/**
282
/**
234
 * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
-
 
235
 * the start of a buffer object.
283
 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
236
 *
284
 *
237
 * @dev_priv: The device private structure.
285
 * @dev_priv: A device private structure.
238
 *
286
 *
-
 
287
 * This function creates a small buffer object that holds the query
239
 * This function will idle the buffer using an uninterruptible wait, then
288
 * result for dummy queries emitted as query barriers.
240
 * map the first page and initialize a pending occlusion query result structure,
289
 * The function will then map the first page and initialize a pending
-
 
290
 * occlusion query result structure, Finally it will unmap the buffer.
241
 * Finally it will unmap the buffer.
291
 * No interruptible waits are done within this function.
242
 *
-
 
243
 * TODO: Since we're only mapping a single page, we should optimize the map
292
 *
244
 * to use kmap_atomic / iomap_atomic.
293
 * Returns an error if bo creation or initialization fails.
245
 */
294
 */
246
static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
295
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
-
 
296
{
-
 
297
	int ret;
247
{
298
	struct ttm_buffer_object *bo;
248
	struct ttm_bo_kmap_obj map;
299
	struct ttm_bo_kmap_obj map;
249
	volatile SVGA3dQueryResult *result;
300
	volatile SVGA3dQueryResult *result;
250
	bool dummy;
-
 
251
	int ret;
-
 
252
	struct ttm_bo_device *bdev = &dev_priv->bdev;
-
 
Line 253... Line -...
253
	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
-
 
254
 
-
 
255
	ttm_bo_reserve(bo, false, false, false, 0);
-
 
256
	spin_lock(&bdev->fence_lock);
-
 
257
    ret = 0; //ttm_bo_wait(bo, false, false, false);
-
 
258
	spin_unlock(&bdev->fence_lock);
-
 
259
	if (unlikely(ret != 0))
-
 
260
		(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
301
	bool dummy;
-
 
302
 
-
 
303
	/*
-
 
304
	 * Create the bo as pinned, so that a tryreserve will
-
 
305
	 * immediately succeed. This is because we're the only
-
 
306
	 * user of the bo currently.
-
 
307
	 */
-
 
308
	ret = ttm_bo_create(&dev_priv->bdev,
-
 
309
			     PAGE_SIZE,
-
 
310
			     ttm_bo_type_device,
-
 
311
			    &vmw_sys_ne_placement,
-
 
312
			     0, false, NULL,
-
 
313
			    &bo);
-
 
314
 
-
 
315
	if (unlikely(ret != 0))
-
 
316
		return ret;
-
 
317
 
-
 
318
	ret = ttm_bo_reserve(bo, false, true, false, 0);
261
					 10*HZ);
319
	BUG_ON(ret != 0);
262
/*
320
 
263
	ret = ttm_bo_kmap(bo, 0, 1, &map);
321
	ret = ttm_bo_kmap(bo, 0, 1, &map);
264
	if (likely(ret == 0)) {
322
	if (likely(ret == 0)) {
265
		result = ttm_kmap_obj_virtual(&map, &dummy);
323
		result = ttm_kmap_obj_virtual(&map, &dummy);
266
		result->totalSize = sizeof(*result);
324
		result->totalSize = sizeof(*result);
267
		result->state = SVGA3D_QUERYSTATE_PENDING;
325
		result->state = SVGA3D_QUERYSTATE_PENDING;
268
		result->result32 = 0xff;
-
 
269
		ttm_bo_kunmap(&map);
-
 
270
	} else
-
 
271
		DRM_ERROR("Dummy query buffer map failed.\n");
-
 
272
*/
326
		result->result32 = 0xff;
-
 
327
		ttm_bo_kunmap(&map);
-
 
328
	}
Line -... Line 329...
-
 
329
	vmw_bo_pin(bo, false);
-
 
330
	ttm_bo_unreserve(bo);
-
 
331
 
-
 
332
	if (unlikely(ret != 0)) {
-
 
333
		DRM_ERROR("Dummy query buffer map failed.\n");
Line 273... Line -...
273
	ttm_bo_unreserve(bo);
-
 
274
}
-
 
275
 
-
 
276
 
-
 
277
/**
-
 
278
 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
-
 
279
 *
-
 
280
 * @dev_priv: A device private structure.
-
 
281
 *
-
 
282
 * This function creates a small buffer object that holds the query
-
 
283
 * result for dummy queries emitted as query barriers.
-
 
284
 * No interruptible waits are done within this function.
-
 
285
 *
-
 
286
 * Returns an error if bo creation fails.
-
 
287
 */
334
		ttm_bo_unref(&bo);
288
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
-
 
289
{
-
 
290
	return ttm_bo_create(&dev_priv->bdev,
-
 
291
			     PAGE_SIZE,
-
 
292
			     ttm_bo_type_device,
335
	} else
Line 293... Line -...
293
			     &vmw_vram_sys_placement,
-
 
294
			     0, false, NULL,
336
		dev_priv->dummy_query_bo = bo;
295
			     &dev_priv->dummy_query_bo);
337
 
296
}
338
	return ret;
297
 
339
}
Line 334... Line 376...
334
	ttm_bo_unref(&dev_priv->dummy_query_bo);
376
	ttm_bo_unref(&dev_priv->dummy_query_bo);
335
	vmw_fence_fifo_down(dev_priv->fman);
377
	vmw_fence_fifo_down(dev_priv->fman);
336
	vmw_fifo_release(dev_priv, &dev_priv->fifo);
378
	vmw_fifo_release(dev_priv, &dev_priv->fifo);
337
}
379
}
Line -... Line 380...
-
 
380
 
338
 
381
 
339
/**
382
/**
340
 * Increase the 3d resource refcount.
383
 * Increase the 3d resource refcount.
341
 * If the count was prevously zero, initialize the fifo, switching to svga
384
 * If the count was prevously zero, initialize the fifo, switching to svga
342
 * mode. Note that the master holds a ref as well, and may request an
385
 * mode. Note that the master holds a ref as well, and may request an
Line 430... Line 473...
430
 
473
 
431
	dev_priv->initial_width = width;
474
	dev_priv->initial_width = width;
432
	dev_priv->initial_height = height;
475
	dev_priv->initial_height = height;
Line -... Line 476...
-
 
476
}
-
 
477
 
-
 
478
/**
-
 
479
 * vmw_dma_masks - set required page- and dma masks
-
 
480
 *
-
 
481
 * @dev: Pointer to struct drm-device
-
 
482
 *
-
 
483
 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
-
 
484
 * restriction also for 64-bit systems.
-
 
485
 */
-
 
486
#ifdef CONFIG_INTEL_IOMMU
-
 
487
static int vmw_dma_masks(struct vmw_private *dev_priv)
-
 
488
{
-
 
489
	struct drm_device *dev = dev_priv->dev;
-
 
490
 
-
 
491
	if (intel_iommu_enabled &&
-
 
492
	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
-
 
493
		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
-
 
494
		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
-
 
495
	}
-
 
496
	return 0;
-
 
497
}
-
 
498
#else
-
 
499
static int vmw_dma_masks(struct vmw_private *dev_priv)
-
 
500
{
-
 
501
	return 0;
-
 
502
}
433
}
503
#endif
434
 
504
 
435
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
505
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
436
{
506
{
437
	struct vmw_private *dev_priv;
507
	struct vmw_private *dev_priv;
438
	int ret;
508
	int ret;
-
 
509
	uint32_t svga_id;
-
 
510
	enum vmw_res_type i;
Line 439... Line 511...
439
	uint32_t svga_id;
511
	bool refuse_dma = false;
Line 440... Line 512...
440
	enum vmw_res_type i;
512
 
441
 
513
 
Line 453... Line 525...
453
	dev_priv->vmw_chipset = chipset;
525
	dev_priv->vmw_chipset = chipset;
454
	dev_priv->last_read_seqno = (uint32_t) -100;
526
	dev_priv->last_read_seqno = (uint32_t) -100;
455
	mutex_init(&dev_priv->hw_mutex);
527
	mutex_init(&dev_priv->hw_mutex);
456
	mutex_init(&dev_priv->cmdbuf_mutex);
528
	mutex_init(&dev_priv->cmdbuf_mutex);
457
	mutex_init(&dev_priv->release_mutex);
529
	mutex_init(&dev_priv->release_mutex);
-
 
530
	mutex_init(&dev_priv->binding_mutex);
458
	rwlock_init(&dev_priv->resource_lock);
531
	rwlock_init(&dev_priv->resource_lock);
Line 459... Line 532...
459
 
532
 
460
	for (i = vmw_res_context; i < vmw_res_max; ++i) {
533
	for (i = vmw_res_context; i < vmw_res_max; ++i) {
461
		idr_init(&dev_priv->res_idr[i]);
534
		idr_init(&dev_priv->res_idr[i]);
Line 489... Line 562...
489
		mutex_unlock(&dev_priv->hw_mutex);
562
		mutex_unlock(&dev_priv->hw_mutex);
490
		goto out_err0;
563
		goto out_err0;
491
	}
564
	}
Line 492... Line 565...
492
 
565
 
-
 
566
	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
-
 
567
//   ret = vmw_dma_select_mode(dev_priv);
-
 
568
//   if (unlikely(ret != 0)) {
-
 
569
//       DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
-
 
570
//       refuse_dma = true;
Line 493... Line 571...
493
	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
571
//   }
494
 
572
 
495
	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
573
	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
496
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
574
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
Line 497... Line 575...
497
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
575
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
Line 498... Line 576...
498
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
576
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
499
 
-
 
500
	vmw_get_initial_size(dev_priv);
-
 
501
 
-
 
502
	if (dev_priv->capabilities & SVGA_CAP_GMR) {
577
 
503
		dev_priv->max_gmr_descriptors =
578
	vmw_get_initial_size(dev_priv);
504
			vmw_read(dev_priv,
-
 
505
				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
-
 
506
		dev_priv->max_gmr_ids =
579
 
507
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
580
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
508
	}
581
		dev_priv->max_gmr_ids =
509
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
582
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
510
		dev_priv->max_gmr_pages =
583
		dev_priv->max_gmr_pages =
Line 517... Line 590...
517
		 * An arbitrary limit of 512MiB on surface
590
		 * An arbitrary limit of 512MiB on surface
518
		 * memory. But all HWV8 hardware supports GMR2.
591
		 * memory. But all HWV8 hardware supports GMR2.
519
		 */
592
		 */
520
		dev_priv->memory_size = 512*1024*1024;
593
		dev_priv->memory_size = 512*1024*1024;
521
	}
594
	}
-
 
595
	dev_priv->max_mob_pages = 0;
-
 
596
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
-
 
597
		uint64_t mem_size =
-
 
598
			vmw_read(dev_priv,
-
 
599
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
-
 
600
 
-
 
601
		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
-
 
602
		dev_priv->prim_bb_mem =
-
 
603
			vmw_read(dev_priv,
-
 
604
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
-
 
605
	} else
-
 
606
		dev_priv->prim_bb_mem = dev_priv->vram_size;
-
 
607
 
-
 
608
	ret = vmw_dma_masks(dev_priv);
-
 
609
	if (unlikely(ret != 0)) {
-
 
610
		mutex_unlock(&dev_priv->hw_mutex);
-
 
611
		goto out_err0;
-
 
612
	}
-
 
613
 
-
 
614
	if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
-
 
615
		dev_priv->prim_bb_mem = dev_priv->vram_size;
Line 522... Line 616...
522
 
616
 
Line 523... Line 617...
523
	mutex_unlock(&dev_priv->hw_mutex);
617
	mutex_unlock(&dev_priv->hw_mutex);
Line 524... Line 618...
524
 
618
 
525
	vmw_print_capabilities(dev_priv->capabilities);
619
	vmw_print_capabilities(dev_priv->capabilities);
526
 
620
 
527
	if (dev_priv->capabilities & SVGA_CAP_GMR) {
-
 
528
		DRM_INFO("Max GMR ids is %u\n",
-
 
529
			 (unsigned)dev_priv->max_gmr_ids);
-
 
530
		DRM_INFO("Max GMR descriptors is %u\n",
-
 
531
			 (unsigned)dev_priv->max_gmr_descriptors);
621
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
532
	}
622
		DRM_INFO("Max GMR ids is %u\n",
533
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
623
			 (unsigned)dev_priv->max_gmr_ids);
534
		DRM_INFO("Max number of GMR pages is %u\n",
624
		DRM_INFO("Max number of GMR pages is %u\n",
535
			 (unsigned)dev_priv->max_gmr_pages);
625
			 (unsigned)dev_priv->max_gmr_pages);
-
 
626
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
-
 
627
			 (unsigned)dev_priv->memory_size / 1024);
536
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
628
	}
537
			 (unsigned)dev_priv->memory_size / 1024);
629
	DRM_INFO("Maximum display memory size is %u kiB\n",
538
	}
630
		 dev_priv->prim_bb_mem / 1024);
539
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
631
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
Line 540... Line 632...
540
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
632
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
541
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
633
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
542
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
634
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
Line -... Line 635...
-
 
635
 
-
 
636
	ret = vmw_ttm_global_init(dev_priv);
Line 543... Line 637...
543
 
637
	if (unlikely(ret != 0))
544
	ret = vmw_ttm_global_init(dev_priv);
638
		goto out_err0;
545
	if (unlikely(ret != 0))
639
 
Line 563... Line 657...
563
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
657
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
564
		goto out_err2;
658
		goto out_err2;
565
	}
659
	}
Line 566... Line 660...
566
 
660
 
-
 
661
	dev_priv->has_gmr = true;
567
	dev_priv->has_gmr = true;
662
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
568
	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
663
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
569
			   dev_priv->max_gmr_ids) != 0) {
664
					 VMW_PL_GMR) != 0) {
570
		DRM_INFO("No GMR memory available. "
665
		DRM_INFO("No GMR memory available. "
571
			 "Graphics memory resources are very limited.\n");
666
			 "Graphics memory resources are very limited.\n");
572
		dev_priv->has_gmr = false;
667
		dev_priv->has_gmr = false;
Line -... Line 668...
-
 
668
	}
-
 
669
 
-
 
670
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
-
 
671
		dev_priv->has_mob = true;
-
 
672
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
-
 
673
				   VMW_PL_MOB) != 0) {
-
 
674
			DRM_INFO("No MOB memory available. "
-
 
675
				 "3D will be disabled.\n");
-
 
676
			dev_priv->has_mob = false;
573
	}
677
		}
574
 
678
	}
Line 575... Line 679...
575
	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
679
	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
576
					 dev_priv->mmio_size);
680
					 dev_priv->mmio_size);
Line 588... Line 692...
588
		ret = -ENOSYS;
692
		ret = -ENOSYS;
589
		DRM_ERROR("Hardware has no pitchlock\n");
693
		DRM_ERROR("Hardware has no pitchlock\n");
590
		goto out_err4;
694
		goto out_err4;
591
	}
695
	}
Line 592... Line 696...
592
 
696
 
593
	dev_priv->tdev = ttm_object_device_init
697
//   dev_priv->tdev = ttm_object_device_init
Line 594... Line 698...
594
	    (dev_priv->mem_global_ref.object, 12);
698
//       (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
595
 
699
 
596
	if (unlikely(dev_priv->tdev == NULL)) {
700
//   if (unlikely(dev_priv->tdev == NULL)) {
597
		DRM_ERROR("Unable to initialize TTM object management.\n");
701
//       DRM_ERROR("Unable to initialize TTM object management.\n");
598
		ret = -ENOMEM;
702
//       ret = -ENOMEM;
Line 599... Line 703...
599
		goto out_err4;
703
//       goto out_err4;
Line 600... Line 704...
600
	}
704
//   }
Line 700... Line 804...
700
		pci_release_regions(dev->pdev);
804
		pci_release_regions(dev->pdev);
Line 701... Line 805...
701
 
805
 
702
	ttm_object_device_release(&dev_priv->tdev);
806
	ttm_object_device_release(&dev_priv->tdev);
703
	iounmap(dev_priv->mmio_virt);
807
	iounmap(dev_priv->mmio_virt);
-
 
808
	arch_phys_wc_del(dev_priv->mmio_mtrr);
-
 
809
	if (dev_priv->has_mob)
704
	arch_phys_wc_del(dev_priv->mmio_mtrr);
810
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
705
	if (dev_priv->has_gmr)
811
	if (dev_priv->has_gmr)
706
		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
812
		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
707
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
813
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
708
	(void)ttm_bo_device_release(&dev_priv->bdev);
814
	(void)ttm_bo_device_release(&dev_priv->bdev);
Line 729... Line 835...
729
			 struct drm_file *file_priv)
835
			 struct drm_file *file_priv)
730
{
836
{
731
	struct vmw_fpriv *vmw_fp;
837
	struct vmw_fpriv *vmw_fp;
Line 732... Line 838...
732
 
838
 
-
 
839
	vmw_fp = vmw_fpriv(file_priv);
-
 
840
 
733
	vmw_fp = vmw_fpriv(file_priv);
841
	if (vmw_fp->locked_master) {
734
	ttm_object_file_release(&vmw_fp->tfile);
842
		struct vmw_master *vmaster =
-
 
843
			vmw_master(vmw_fp->locked_master);
-
 
844
 
735
	if (vmw_fp->locked_master)
845
		ttm_vt_unlock(&vmaster->lock);
-
 
846
		drm_master_put(&vmw_fp->locked_master);
-
 
847
	}
-
 
848
 
736
		drm_master_put(&vmw_fp->locked_master);
849
	ttm_object_file_release(&vmw_fp->tfile);
737
	kfree(vmw_fp);
850
	kfree(vmw_fp);
738
}
851
}
Line 739... Line 852...
739
#endif
852
#endif
Line 808... Line 921...
808
		ret = drm_mode_set_config_internal(&set);
921
		ret = drm_mode_set_config_internal(&set);
809
		WARN_ON(ret != 0);
922
		WARN_ON(ret != 0);
810
	}
923
	}
Line 811... Line 924...
811
 
924
 
-
 
925
}
Line 812... Line 926...
812
}
926
#endif
813
 
927
 
814
static void vmw_master_init(struct vmw_master *vmaster)
928
static void vmw_master_init(struct vmw_master *vmaster)
815
{
929
{
816
	ttm_lock_init(&vmaster->lock);
930
//	ttm_lock_init(&vmaster->lock);
817
	INIT_LIST_HEAD(&vmaster->fb_surf);
931
	INIT_LIST_HEAD(&vmaster->fb_surf);
Line 818... Line 932...
818
	mutex_init(&vmaster->fb_surf_mutex);
932
	mutex_init(&vmaster->fb_surf_mutex);
Line 826... Line 940...
826
	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
940
	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
827
	if (unlikely(vmaster == NULL))
941
	if (unlikely(vmaster == NULL))
828
		return -ENOMEM;
942
		return -ENOMEM;
Line 829... Line 943...
829
 
943
 
830
	vmw_master_init(vmaster);
944
	vmw_master_init(vmaster);
831
	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
945
//	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
Line 832... Line 946...
832
	master->driver_priv = vmaster;
946
	master->driver_priv = vmaster;
833
 
947
 
Line 841... Line 955...
841
 
955
 
842
	master->driver_priv = NULL;
956
	master->driver_priv = NULL;
843
	kfree(vmaster);
957
	kfree(vmaster);
Line 844... Line 958...
844
}
958
}
845
 
959
 
846
 
960
#if 0
847
static int vmw_master_set(struct drm_device *dev,
961
static int vmw_master_set(struct drm_device *dev,
848
			  struct drm_file *file_priv,
962
			  struct drm_file *file_priv,
849
			  bool from_open)
963
			  bool from_open)
Line 916... Line 1030...
916
	 * it locked.
1030
	 * it locked.
917
	 */
1031
	 */
Line 918... Line 1032...
918
 
1032
 
919
	vmw_fp->locked_master = drm_master_get(file_priv->master);
1033
	vmw_fp->locked_master = drm_master_get(file_priv->master);
920
	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-
 
921
	vmw_execbuf_release_pinned_bo(dev_priv);
-
 
922
 
1034
	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
923
	if (unlikely((ret != 0))) {
1035
	if (unlikely((ret != 0))) {
924
		DRM_ERROR("Unable to lock TTM at VT switch.\n");
1036
		DRM_ERROR("Unable to lock TTM at VT switch.\n");
925
		drm_master_put(&vmw_fp->locked_master);
1037
		drm_master_put(&vmw_fp->locked_master);
Line 926... Line 1038...
926
	}
1038
	}
Line 927... Line 1039...
927
 
1039
 
928
	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1040
	vmw_execbuf_release_pinned_bo(dev_priv);
929
 
1041
 
930
	if (!dev_priv->enable_fb) {
1042
	if (!dev_priv->enable_fb) {
Line 1148... Line 1260...
1148
 
1260
 
1149
 
1261
 
1150
MODULE_AUTHOR("VMware Inc. and others");
1262
MODULE_AUTHOR("VMware Inc. and others");
-
 
1263
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
-
 
1264
MODULE_LICENSE("GPL and additional rights");
-
 
1265
 
-
 
1266
 
-
 
1267
void *kmemdup(const void *src, size_t len, gfp_t gfp)
-
 
1268
{
-
 
1269
    void *p;
-
 
1270
 
-
 
1271
    p = kmalloc(len, gfp);
-
 
1272
    if (p)
-
 
1273
        memcpy(p, src, len);
-
 
1274
    return p;