32,6 → 32,7 |
#include <drm/ttm/ttm_bo_driver.h> |
#include <drm/ttm/ttm_object.h> |
//#include <drm/ttm/ttm_module.h> |
#include <linux/dma_remapping.h> |
|
#define VMWGFX_DRIVER_NAME "vmwgfx" |
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
111,6 → 112,21 |
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
struct drm_vmw_update_layout_arg) |
#define DRM_IOCTL_VMW_CREATE_SHADER \ |
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ |
struct drm_vmw_shader_create_arg) |
#define DRM_IOCTL_VMW_UNREF_SHADER \ |
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ |
struct drm_vmw_shader_arg) |
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ |
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ |
union drm_vmw_gb_surface_create_arg) |
#define DRM_IOCTL_VMW_GB_SURFACE_REF \ |
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ |
union drm_vmw_gb_surface_reference_arg) |
#define DRM_IOCTL_VMW_SYNCCPU \ |
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ |
struct drm_vmw_synccpu_arg) |
|
/** |
* The core DRM version of this macro doesn't account for |
176,6 → 192,21 |
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
vmw_kms_update_layout_ioctl, |
DRM_MASTER | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_CREATE_SHADER, |
vmw_shader_define_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_UNREF_SHADER, |
vmw_shader_destroy_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, |
vmw_gb_surface_define_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, |
vmw_gb_surface_reference_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_SYNCCPU, |
vmw_user_dmabuf_synccpu_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
}; |
#endif |
|
185,6 → 216,10 |
}; |
|
static int enable_fbdev = 1; |
static int vmw_force_iommu; |
static int vmw_restrict_iommu; |
static int vmw_force_coherent; |
static int vmw_restrict_dma_mask; |
|
static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
static void vmw_master_init(struct vmw_master *); |
191,7 → 226,16 |
|
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
module_param_named(enable_fbdev, enable_fbdev, int, 0600); |
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
module_param_named(force_dma_api, vmw_force_iommu, int, 0600); |
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); |
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); |
|
|
static void vmw_print_capabilities(uint32_t capabilities) |
{ |
DRM_INFO("Capabilities:\n"); |
227,39 → 271,53 |
DRM_INFO(" GMR2.\n"); |
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
DRM_INFO(" Screen Object 2.\n"); |
if (capabilities & SVGA_CAP_COMMAND_BUFFERS) |
DRM_INFO(" Command Buffers.\n"); |
if (capabilities & SVGA_CAP_CMD_BUFFERS_2) |
DRM_INFO(" Command Buffers 2.\n"); |
if (capabilities & SVGA_CAP_GBOBJECTS) |
DRM_INFO(" Guest Backed Resources.\n"); |
} |
|
|
/** |
* vmw_execbuf_prepare_dummy_query - Initialize a query result structure at |
* the start of a buffer object. |
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
* |
* @dev_priv: The device private structure. |
* @dev_priv: A device private structure. |
* |
* This function will idle the buffer using an uninterruptible wait, then |
* map the first page and initialize a pending occlusion query result structure, |
* Finally it will unmap the buffer. |
* This function creates a small buffer object that holds the query |
* result for dummy queries emitted as query barriers. |
* The function will then map the first page and initialize a pending |
* occlusion query result structure, Finally it will unmap the buffer. |
* No interruptible waits are done within this function. |
* |
* TODO: Since we're only mapping a single page, we should optimize the map |
* to use kmap_atomic / iomap_atomic. |
* Returns an error if bo creation or initialization fails. |
*/ |
static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) |
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
{ |
int ret; |
struct ttm_buffer_object *bo; |
struct ttm_bo_kmap_obj map; |
volatile SVGA3dQueryResult *result; |
bool dummy; |
int ret; |
struct ttm_bo_device *bdev = &dev_priv->bdev; |
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; |
|
ttm_bo_reserve(bo, false, false, false, 0); |
spin_lock(&bdev->fence_lock); |
ret = 0; //ttm_bo_wait(bo, false, false, false); |
spin_unlock(&bdev->fence_lock); |
/* |
* Create the bo as pinned, so that a tryreserve will |
* immediately succeed. This is because we're the only |
* user of the bo currently. |
*/ |
ret = ttm_bo_create(&dev_priv->bdev, |
PAGE_SIZE, |
ttm_bo_type_device, |
&vmw_sys_ne_placement, |
0, false, NULL, |
&bo); |
|
if (unlikely(ret != 0)) |
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, |
10*HZ); |
/* |
return ret; |
|
ret = ttm_bo_reserve(bo, false, true, false, 0); |
BUG_ON(ret != 0); |
|
ret = ttm_bo_kmap(bo, 0, 1, &map); |
if (likely(ret == 0)) { |
result = ttm_kmap_obj_virtual(&map, &dummy); |
267,35 → 325,19 |
result->state = SVGA3D_QUERYSTATE_PENDING; |
result->result32 = 0xff; |
ttm_bo_kunmap(&map); |
} else |
DRM_ERROR("Dummy query buffer map failed.\n"); |
*/ |
} |
vmw_bo_pin(bo, false); |
ttm_bo_unreserve(bo); |
} |
|
if (unlikely(ret != 0)) { |
DRM_ERROR("Dummy query buffer map failed.\n"); |
ttm_bo_unref(&bo); |
} else |
dev_priv->dummy_query_bo = bo; |
|
/** |
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
* |
* @dev_priv: A device private structure. |
* |
* This function creates a small buffer object that holds the query |
* result for dummy queries emitted as query barriers. |
* No interruptible waits are done within this function. |
* |
* Returns an error if bo creation fails. |
*/ |
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
{ |
return ttm_bo_create(&dev_priv->bdev, |
PAGE_SIZE, |
ttm_bo_type_device, |
&vmw_vram_sys_placement, |
0, false, NULL, |
&dev_priv->dummy_query_bo); |
return ret; |
} |
|
|
static int vmw_request_device(struct vmw_private *dev_priv) |
{ |
int ret; |
336,6 → 378,7 |
vmw_fifo_release(dev_priv, &dev_priv->fifo); |
} |
|
|
/** |
* Increase the 3d resource refcount. |
* If the count was prevously zero, initialize the fifo, switching to svga |
432,6 → 475,33 |
dev_priv->initial_height = height; |
} |
|
/** |
* vmw_dma_masks - set required page- and dma masks |
* |
* @dev: Pointer to struct drm-device |
* |
* With 32-bit we can only handle 32 bit PFNs. Optionally set that |
* restriction also for 64-bit systems. |
*/ |
#ifdef CONFIG_INTEL_IOMMU |
static int vmw_dma_masks(struct vmw_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
|
if (intel_iommu_enabled && |
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); |
} |
return 0; |
} |
#else |
static int vmw_dma_masks(struct vmw_private *dev_priv) |
{ |
return 0; |
} |
#endif |
|
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
{ |
struct vmw_private *dev_priv; |
438,7 → 508,9 |
int ret; |
uint32_t svga_id; |
enum vmw_res_type i; |
bool refuse_dma = false; |
|
|
ENTER(); |
|
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
455,6 → 527,7 |
mutex_init(&dev_priv->hw_mutex); |
mutex_init(&dev_priv->cmdbuf_mutex); |
mutex_init(&dev_priv->release_mutex); |
mutex_init(&dev_priv->binding_mutex); |
rwlock_init(&dev_priv->resource_lock); |
|
for (i = vmw_res_context; i < vmw_res_max; ++i) { |
491,6 → 564,11 |
} |
|
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
// ret = vmw_dma_select_mode(dev_priv); |
// if (unlikely(ret != 0)) { |
// DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); |
// refuse_dma = true; |
// } |
|
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
499,14 → 577,9 |
|
vmw_get_initial_size(dev_priv); |
|
if (dev_priv->capabilities & SVGA_CAP_GMR) { |
dev_priv->max_gmr_descriptors = |
vmw_read(dev_priv, |
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); |
if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
dev_priv->max_gmr_ids = |
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
} |
if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
dev_priv->max_gmr_pages = |
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
dev_priv->memory_size = |
519,23 → 592,42 |
*/ |
dev_priv->memory_size = 512*1024*1024; |
} |
dev_priv->max_mob_pages = 0; |
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
uint64_t mem_size = |
vmw_read(dev_priv, |
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); |
|
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; |
dev_priv->prim_bb_mem = |
vmw_read(dev_priv, |
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
} else |
dev_priv->prim_bb_mem = dev_priv->vram_size; |
|
ret = vmw_dma_masks(dev_priv); |
if (unlikely(ret != 0)) { |
mutex_unlock(&dev_priv->hw_mutex); |
goto out_err0; |
} |
|
if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) |
dev_priv->prim_bb_mem = dev_priv->vram_size; |
|
mutex_unlock(&dev_priv->hw_mutex); |
|
vmw_print_capabilities(dev_priv->capabilities); |
|
if (dev_priv->capabilities & SVGA_CAP_GMR) { |
if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
DRM_INFO("Max GMR ids is %u\n", |
(unsigned)dev_priv->max_gmr_ids); |
DRM_INFO("Max GMR descriptors is %u\n", |
(unsigned)dev_priv->max_gmr_descriptors); |
} |
if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
DRM_INFO("Max number of GMR pages is %u\n", |
(unsigned)dev_priv->max_gmr_pages); |
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
(unsigned)dev_priv->memory_size / 1024); |
} |
DRM_INFO("Maximum display memory size is %u kiB\n", |
dev_priv->prim_bb_mem / 1024); |
DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
dev_priv->vram_start, dev_priv->vram_size / 1024); |
DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
546,6 → 638,8 |
goto out_err0; |
|
|
vmw_master_init(&dev_priv->fbdev_master); |
dev_priv->active_master = &dev_priv->fbdev_master; |
|
|
ret = ttm_bo_device_init(&dev_priv->bdev, |
565,13 → 659,23 |
} |
|
dev_priv->has_gmr = true; |
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
dev_priv->max_gmr_ids) != 0) { |
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
VMW_PL_GMR) != 0) { |
DRM_INFO("No GMR memory available. " |
"Graphics memory resources are very limited.\n"); |
dev_priv->has_gmr = false; |
} |
|
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
dev_priv->has_mob = true; |
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, |
VMW_PL_MOB) != 0) { |
DRM_INFO("No MOB memory available. " |
"3D will be disabled.\n"); |
dev_priv->has_mob = false; |
} |
} |
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, |
dev_priv->mmio_size); |
|
590,14 → 694,14 |
goto out_err4; |
} |
|
dev_priv->tdev = ttm_object_device_init |
(dev_priv->mem_global_ref.object, 12); |
// dev_priv->tdev = ttm_object_device_init |
// (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); |
|
if (unlikely(dev_priv->tdev == NULL)) { |
DRM_ERROR("Unable to initialize TTM object management.\n"); |
ret = -ENOMEM; |
goto out_err4; |
} |
// if (unlikely(dev_priv->tdev == NULL)) { |
// DRM_ERROR("Unable to initialize TTM object management.\n"); |
// ret = -ENOMEM; |
// goto out_err4; |
// } |
|
dev->dev_private = dev_priv; |
|
702,6 → 806,8 |
ttm_object_device_release(&dev_priv->tdev); |
iounmap(dev_priv->mmio_virt); |
arch_phys_wc_del(dev_priv->mmio_mtrr); |
if (dev_priv->has_mob) |
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
if (dev_priv->has_gmr) |
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
731,9 → 837,16 |
struct vmw_fpriv *vmw_fp; |
|
vmw_fp = vmw_fpriv(file_priv); |
|
if (vmw_fp->locked_master) { |
struct vmw_master *vmaster = |
vmw_master(vmw_fp->locked_master); |
|
ttm_vt_unlock(&vmaster->lock); |
drm_master_put(&vmw_fp->locked_master); |
} |
|
ttm_object_file_release(&vmw_fp->tfile); |
if (vmw_fp->locked_master) |
drm_master_put(&vmw_fp->locked_master); |
kfree(vmw_fp); |
} |
#endif |
810,10 → 923,11 |
} |
|
} |
#endif |
|
static void vmw_master_init(struct vmw_master *vmaster) |
{ |
ttm_lock_init(&vmaster->lock); |
// ttm_lock_init(&vmaster->lock); |
INIT_LIST_HEAD(&vmaster->fb_surf); |
mutex_init(&vmaster->fb_surf_mutex); |
} |
828,7 → 942,7 |
return -ENOMEM; |
|
vmw_master_init(vmaster); |
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
// ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
master->driver_priv = vmaster; |
|
return 0; |
843,7 → 957,7 |
kfree(vmaster); |
} |
|
|
#if 0 |
static int vmw_master_set(struct drm_device *dev, |
struct drm_file *file_priv, |
bool from_open) |
918,14 → 1032,12 |
|
vmw_fp->locked_master = drm_master_get(file_priv->master); |
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
vmw_execbuf_release_pinned_bo(dev_priv); |
|
if (unlikely((ret != 0))) { |
DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
drm_master_put(&vmw_fp->locked_master); |
} |
|
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
vmw_execbuf_release_pinned_bo(dev_priv); |
|
if (!dev_priv->enable_fb) { |
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); |
1150,3 → 1262,15 |
MODULE_AUTHOR("VMware Inc. and others"); |
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
MODULE_LICENSE("GPL and additional rights"); |
|
|
void *kmemdup(const void *src, size_t len, gfp_t gfp) |
{ |
void *p; |
|
p = kmalloc(len, gfp); |
if (p) |
memcpy(p, src, len); |
return p; |
} |
|