/drivers/video/drm/ttm/ttm_bo.c |
---|
426,8 → 426,20 |
sync_obj = driver->sync_obj_ref(bo->sync_obj); |
spin_unlock(&bdev->fence_lock); |
if (!ret) |
if (!ret) { |
/* |
* Make NO_EVICT bos immediately available to |
* shrinkers, now that they are queued for |
* destruction. |
*/ |
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
ttm_bo_add_to_lru(bo); |
} |
ww_mutex_unlock(&bo->resv->lock); |
} |
kref_get(&bo->list_kref); |
list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
944,7 → 956,7 |
} |
EXPORT_SYMBOL(ttm_bo_mem_space); |
int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
bool interruptible, |
bool no_wait_gpu) |
986,8 → 998,9 |
} |
#endif |
static int ttm_bo_mem_compat(struct ttm_placement *placement, |
struct ttm_mem_reg *mem) |
static bool ttm_bo_mem_compat(struct ttm_placement *placement, |
struct ttm_mem_reg *mem, |
uint32_t *new_flags) |
{ |
int i; |
994,18 → 1007,25 |
if (mem->mm_node && placement->lpfn != 0 && |
(mem->start < placement->fpfn || |
mem->start + mem->num_pages > placement->lpfn)) |
return -1; |
return false; |
for (i = 0; i < placement->num_placement; i++) { |
if ((placement->placement[i] & mem->placement & |
TTM_PL_MASK_CACHING) && |
(placement->placement[i] & mem->placement & |
TTM_PL_MASK_MEM)) |
return i; |
*new_flags = placement->placement[i]; |
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
(*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
return true; |
} |
return -1; |
for (i = 0; i < placement->num_busy_placement; i++) { |
*new_flags = placement->busy_placement[i]; |
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
(*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
return true; |
} |
return false; |
} |
int ttm_bo_validate(struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
bool interruptible, |
1012,6 → 1032,7 |
bool no_wait_gpu) |
{ |
int ret; |
uint32_t new_flags; |
// BUG_ON(!ttm_bo_is_reserved(bo)); |
/* Check that range is valid */ |
1022,8 → 1043,7 |
/* |
* Check whether we need to move buffer. |
*/ |
ret = ttm_bo_mem_compat(placement, &bo->mem); |
if (ret < 0) { |
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { |
// ret = ttm_bo_move_buffer(bo, placement, interruptible, |
// no_wait_gpu); |
if (ret) |
1033,7 → 1053,7 |
* Use the access and other non-mapping-related flag bits from |
* the compatible memory placement flags to the active flags |
*/ |
ttm_flag_masked(&bo->mem.placement, placement->placement[ret], |
ttm_flag_masked(&bo->mem.placement, new_flags, |
~TTM_PL_MASK_MEMTYPE); |
} |
/* |
1103,6 → 1123,7 |
INIT_LIST_HEAD(&bo->ddestroy); |
INIT_LIST_HEAD(&bo->swap); |
INIT_LIST_HEAD(&bo->io_reserve_lru); |
mutex_init(&bo->wu_mutex); |
bo->bdev = bdev; |
bo->glob = bdev->glob; |
bo->type = type; |
1368,3 → 1389,36 |
return true; |
} |
int ttm_bo_wait(struct ttm_buffer_object *bo, |
bool lazy, bool interruptible, bool no_wait) |
{ |
struct ttm_bo_driver *driver = bo->bdev->driver; |
struct ttm_bo_device *bdev = bo->bdev; |
void *sync_obj; |
int ret = 0; |
if (likely(bo->sync_obj == NULL)) |
return 0; |
return 0; |
} |
EXPORT_SYMBOL(ttm_bo_wait); |
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
{ |
struct ttm_bo_device *bdev = bo->bdev; |
int ret = 0; |
/* |
* Using ttm_bo_reserve makes sure the lru lists are updated. |
*/ |
return ret; |
} |
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); |
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) |
{ |
atomic_dec(&bo->cpu_writers); |
} |
EXPORT_SYMBOL(ttm_bo_synccpu_write_release); |
/drivers/video/drm/ttm/ttm_bo_util.c |
---|
187,7 → 187,7 |
} |
} |
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
void **virtual) |
{ |
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
219,7 → 219,7 |
return 0; |
} |
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
void *virtual) |
{ |
struct ttm_mem_type_manager *man; |
343,20 → 343,26 |
if (ret) |
goto out; |
/* |
* Single TTM move. NOP. |
*/ |
if (old_iomap == NULL && new_iomap == NULL) |
goto out2; |
/* |
* Don't move nonexistent data. Clear destination instead. |
*/ |
if (old_iomap == NULL && ttm == NULL) |
goto out2; |
if (ttm->state == tt_unpopulated) { |
/* |
* TTM might be null for moves within the same region. |
*/ |
if (ttm && ttm->state == tt_unpopulated) { |
ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
if (ret) { |
/* if we fail here don't nuke the mm node |
* as the bo still owns it */ |
old_copy.mm_node = NULL; |
if (ret) |
goto out1; |
} |
} |
add = 0; |
dir = 1; |
381,12 → 387,9 |
prot); |
} else |
ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
if (ret) { |
/* failing here, means keep old copy as-is */ |
old_copy.mm_node = NULL; |
if (ret) |
goto out1; |
} |
} |
mb(); |
out2: |
old_copy = *old_mem; |
403,6 → 406,11 |
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
out: |
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
/* |
* On error, keep the mm node! |
*/ |
if (!ret) |
ttm_bo_mem_put(bo, &old_copy); |
return ret; |
} |
582,7 → 590,7 |
if (start_page > bo->num_pages) |
return -EINVAL; |
#if 0 |
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) |
if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
return -EPERM; |
#endif |
(void) ttm_mem_io_lock(man, false); |
/drivers/video/drm/ttm/ttm_object.c |
---|
1,6 → 1,6 |
/************************************************************************** |
* |
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA |
* Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
26,6 → 26,12 |
**************************************************************************/ |
/* |
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
* |
* While no substantial code is shared, the prime code is inspired by |
* drm_prime.c, with |
* Authors: |
* Dave Airlie <airlied@redhat.com> |
* Rob Clark <rob.clark@linaro.org> |
*/ |
/** @file ttm_ref_object.c |
* |
34,6 → 40,7 |
* and release on file close. |
*/ |
/** |
* struct ttm_object_file |
* |
51,6 → 58,8 |
#define pr_fmt(fmt) "[TTM] " fmt |
#include <linux/mutex.h> |
#include <drm/ttm/ttm_object.h> |
#include <drm/ttm/ttm_module.h> |
#include <linux/list.h> |
69,7 → 78,7 |
struct ttm_object_file { |
struct ttm_object_device *tdev; |
rwlock_t lock; |
spinlock_t lock; |
struct list_head ref_list; |
struct drm_open_hash ref_hash[TTM_REF_NUM]; |
struct kref refcount; |
124,6 → 133,8 |
struct ttm_object_file *tfile; |
}; |
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf); |
static inline struct ttm_object_file * |
ttm_object_file_ref(struct ttm_object_file *tfile) |
{ |
206,11 → 217,10 |
* call_rcu() or ttm_base_object_kfree(). |
*/ |
if (base->refcount_release) { |
ttm_object_file_unref(&base->tfile); |
if (base->refcount_release) |
base->refcount_release(&base); |
} |
} |
void ttm_base_object_unref(struct ttm_base_object **p_base) |
{ |
225,32 → 235,44 |
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, |
uint32_t key) |
{ |
struct ttm_object_device *tdev = tfile->tdev; |
struct ttm_base_object *base; |
struct ttm_base_object *base = NULL; |
struct drm_hash_item *hash; |
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; |
int ret; |
// rcu_read_lock(); |
ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); |
ret = drm_ht_find_item_rcu(ht, key, &hash); |
if (likely(ret == 0)) { |
base = drm_hash_entry(hash, struct ttm_base_object, hash); |
ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; |
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; |
if (!kref_get_unless_zero(&base->refcount)) |
base = NULL; |
} |
// rcu_read_unlock(); |
if (unlikely(ret != 0)) |
return NULL; |
return base; |
} |
EXPORT_SYMBOL(ttm_base_object_lookup); |
if (tfile != base->tfile && !base->shareable) { |
pr_err("Attempted access of non-shareable object\n"); |
ttm_base_object_unref(&base); |
return NULL; |
struct ttm_base_object * |
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) |
{ |
struct ttm_base_object *base = NULL; |
struct drm_hash_item *hash; |
struct drm_open_hash *ht = &tdev->object_hash; |
int ret; |
ret = drm_ht_find_item_rcu(ht, key, &hash); |
if (likely(ret == 0)) { |
base = drm_hash_entry(hash, struct ttm_base_object, hash); |
if (!kref_get_unless_zero(&base->refcount)) |
base = NULL; |
} |
return base; |
} |
EXPORT_SYMBOL(ttm_base_object_lookup); |
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); |
int ttm_ref_object_add(struct ttm_object_file *tfile, |
struct ttm_base_object *base, |
266,17 → 288,15 |
*existed = true; |
while (ret == -EINVAL) { |
read_lock(&tfile->lock); |
ret = drm_ht_find_item(ht, base->hash.key, &hash); |
ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); |
if (ret == 0) { |
ref = drm_hash_entry(hash, struct ttm_ref_object, hash); |
kref_get(&ref->kref); |
read_unlock(&tfile->lock); |
if (!kref_get_unless_zero(&ref->kref)) { |
break; |
} |
} |
read_unlock(&tfile->lock); |
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), |
false, false); |
if (unlikely(ret != 0)) |
293,19 → 313,19 |
ref->ref_type = ref_type; |
kref_init(&ref->kref); |
write_lock(&tfile->lock); |
ret = drm_ht_insert_item(ht, &ref->hash); |
spin_lock(&tfile->lock); |
ret = drm_ht_insert_item_rcu(ht, &ref->hash); |
if (likely(ret == 0)) { |
list_add_tail(&ref->head, &tfile->ref_list); |
kref_get(&base->refcount); |
write_unlock(&tfile->lock); |
spin_unlock(&tfile->lock); |
if (existed != NULL) |
*existed = false; |
break; |
} |
write_unlock(&tfile->lock); |
spin_unlock(&tfile->lock); |
BUG_ON(ret != -EINVAL); |
ttm_mem_global_free(mem_glob, sizeof(*ref)); |
326,9 → 346,9 |
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; |
ht = &tfile->ref_hash[ref->ref_type]; |
(void)drm_ht_remove_item(ht, &ref->hash); |
(void)drm_ht_remove_item_rcu(ht, &ref->hash); |
list_del(&ref->head); |
write_unlock(&tfile->lock); |
spin_unlock(&tfile->lock); |
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) |
base->ref_obj_release(base, ref->ref_type); |
336,7 → 356,7 |
ttm_base_object_unref(&ref->obj); |
ttm_mem_global_free(mem_glob, sizeof(*ref)); |
kfree(ref); |
write_lock(&tfile->lock); |
spin_lock(&tfile->lock); |
} |
int ttm_ref_object_base_unref(struct ttm_object_file *tfile, |
347,15 → 367,15 |
struct drm_hash_item *hash; |
int ret; |
write_lock(&tfile->lock); |
spin_lock(&tfile->lock); |
ret = drm_ht_find_item(ht, key, &hash); |
if (unlikely(ret != 0)) { |
write_unlock(&tfile->lock); |
spin_unlock(&tfile->lock); |
return -EINVAL; |
} |
ref = drm_hash_entry(hash, struct ttm_ref_object, hash); |
kref_put(&ref->kref, ttm_ref_object_release); |
write_unlock(&tfile->lock); |
spin_unlock(&tfile->lock); |
return 0; |
} |
EXPORT_SYMBOL(ttm_ref_object_base_unref); |
368,7 → 388,7 |
struct ttm_object_file *tfile = *p_tfile; |
*p_tfile = NULL; |
write_lock(&tfile->lock); |
spin_lock(&tfile->lock); |
/* |
* Since we release the lock within the loop, we have to |
384,7 → 404,7 |
for (i = 0; i < TTM_REF_NUM; ++i) |
drm_ht_remove(&tfile->ref_hash[i]); |
write_unlock(&tfile->lock); |
spin_unlock(&tfile->lock); |
ttm_object_file_unref(&tfile); |
} |
EXPORT_SYMBOL(ttm_object_file_release); |
400,7 → 420,7 |
if (unlikely(tfile == NULL)) |
return NULL; |
rwlock_init(&tfile->lock); |
spin_lock_init(&tfile->lock); |
tfile->tdev = tdev; |
kref_init(&tfile->refcount); |
INIT_LIST_HEAD(&tfile->ref_list); |
424,9 → 444,10 |
} |
EXPORT_SYMBOL(ttm_object_file_init); |
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global |
*mem_glob, |
unsigned int hash_order) |
struct ttm_object_device * |
ttm_object_device_init(struct ttm_mem_global *mem_glob, |
unsigned int hash_order, |
const struct dma_buf_ops *ops) |
{ |
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); |
int ret; |
438,10 → 459,17 |
spin_lock_init(&tdev->object_lock); |
atomic_set(&tdev->object_count, 0); |
ret = drm_ht_create(&tdev->object_hash, hash_order); |
if (ret != 0) |
goto out_no_object_hash; |
if (likely(ret == 0)) |
// tdev->ops = *ops; |
// tdev->dmabuf_release = tdev->ops.release; |
// tdev->ops.release = ttm_prime_dmabuf_release; |
// tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + |
// ttm_round_pot(sizeof(struct file)); |
return tdev; |
out_no_object_hash: |
kfree(tdev); |
return NULL; |
} |
/drivers/video/drm/ttm/ttm_page_alloc.c |
---|
41,7 → 41,7 |
#include <linux/mm.h> |
#include <linux/seq_file.h> /* for seq_printf */ |
#include <linux/slab.h> |
//#include <linux/dma-mapping.h> |
#include <linux/dma-mapping.h> |
//#include <linux/atomic.h> |
/drivers/video/drm/ttm/ttm_tt.c |
---|
172,9 → 172,8 |
ttm_tt_unbind(ttm); |
} |
if (likely(ttm->pages != NULL)) { |
ttm->bdev->driver->ttm_tt_unpopulate(ttm); |
} |
// if (ttm->state == tt_unbound) |
// ttm_tt_unpopulate(ttm); |
// if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && |
// ttm->swap_storage) |
368,7 → 367,7 |
page_cache_release(to_page); |
} |
ttm->bdev->driver->ttm_tt_unpopulate(ttm); |
ttm_tt_unpopulate(ttm); |
ttm->swap_storage = swap_storage; |
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; |
if (persistent_swap_storage) |
/drivers/video/drm/vmwgfx/Makefile |
---|
7,20 → 7,20 |
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 |
DRV_TOPDIR = $(CURDIR)/../../.. |
DRV_INCLUDES = $(DRV_TOPDIR)/include |
DRM_TOPDIR = $(CURDIR)/.. |
DRV_INCLUDES = $(DRV_TOPDIR)/include |
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \ |
-I$(DRV_INCLUDES)/linux |
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/uapi |
CFLAGS = -c -O2 $(INCLUDES) -march=i686 -fomit-frame-pointer -fno-builtin-printf |
CFLAGS+= -mno-ms-bitfields |
LIBPATH:= $(DRV_TOPDIR)/ddk |
LIBS:= -lddk -lcore -lgcc |
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0\ |
LDFLAGS = -nostdlib -shared -s --image-base 0\ |
--file-alignment 512 --section-alignment 4096 |
51,8 → 51,10 |
vmwgfx_irq.c \ |
vmwgfx_kms.c \ |
vmwgfx_marker.c \ |
vmwgfx_mob.c \ |
vmwgfx_resource.c \ |
vmwgfx_scrn.c \ |
vmwgfx_shader.c \ |
vmwgfx_surface.c \ |
vmwgfx_ttm_glue.c \ |
../hdmi.c \ |
/drivers/video/drm/vmwgfx/svga3d_reg.h |
---|
34,6 → 34,8 |
#include "svga_reg.h" |
typedef uint32 PPN; |
typedef __le64 PPN64; |
/* |
* 3D Hardware Version |
71,6 → 73,9 |
#define SVGA3D_MAX_CONTEXT_IDS 256 |
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024) |
#define SVGA3D_NUM_TEXTURE_UNITS 32 |
#define SVGA3D_NUM_LIGHTS 8 |
/* |
* Surface formats. |
* |
81,6 → 86,7 |
*/ |
typedef enum SVGA3dSurfaceFormat { |
SVGA3D_FORMAT_MIN = 0, |
SVGA3D_FORMAT_INVALID = 0, |
SVGA3D_X8R8G8B8 = 1, |
134,12 → 140,6 |
SVGA3D_RG_S10E5 = 35, |
SVGA3D_RG_S23E8 = 36, |
/* |
* Any surface can be used as a buffer object, but SVGA3D_BUFFER is |
* the most efficient format to use when creating new surfaces |
* expressly for index or vertex data. |
*/ |
SVGA3D_BUFFER = 37, |
SVGA3D_Z_D24X8 = 38, |
159,8 → 159,99 |
/* Video format with alpha */ |
SVGA3D_AYUV = 45, |
SVGA3D_R32G32B32A32_TYPELESS = 46, |
SVGA3D_R32G32B32A32_FLOAT = 25, |
SVGA3D_R32G32B32A32_UINT = 47, |
SVGA3D_R32G32B32A32_SINT = 48, |
SVGA3D_R32G32B32_TYPELESS = 49, |
SVGA3D_R32G32B32_FLOAT = 50, |
SVGA3D_R32G32B32_UINT = 51, |
SVGA3D_R32G32B32_SINT = 52, |
SVGA3D_R16G16B16A16_TYPELESS = 53, |
SVGA3D_R16G16B16A16_FLOAT = 24, |
SVGA3D_R16G16B16A16_UNORM = 41, |
SVGA3D_R16G16B16A16_UINT = 54, |
SVGA3D_R16G16B16A16_SNORM = 55, |
SVGA3D_R16G16B16A16_SINT = 56, |
SVGA3D_R32G32_TYPELESS = 57, |
SVGA3D_R32G32_FLOAT = 36, |
SVGA3D_R32G32_UINT = 58, |
SVGA3D_R32G32_SINT = 59, |
SVGA3D_R32G8X24_TYPELESS = 60, |
SVGA3D_D32_FLOAT_S8X24_UINT = 61, |
SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, |
SVGA3D_X32_TYPELESS_G8X24_UINT = 63, |
SVGA3D_R10G10B10A2_TYPELESS = 64, |
SVGA3D_R10G10B10A2_UNORM = 26, |
SVGA3D_R10G10B10A2_UINT = 65, |
SVGA3D_R11G11B10_FLOAT = 66, |
SVGA3D_R8G8B8A8_TYPELESS = 67, |
SVGA3D_R8G8B8A8_UNORM = 68, |
SVGA3D_R8G8B8A8_UNORM_SRGB = 69, |
SVGA3D_R8G8B8A8_UINT = 70, |
SVGA3D_R8G8B8A8_SNORM = 28, |
SVGA3D_R8G8B8A8_SINT = 71, |
SVGA3D_R16G16_TYPELESS = 72, |
SVGA3D_R16G16_FLOAT = 35, |
SVGA3D_R16G16_UNORM = 40, |
SVGA3D_R16G16_UINT = 73, |
SVGA3D_R16G16_SNORM = 39, |
SVGA3D_R16G16_SINT = 74, |
SVGA3D_R32_TYPELESS = 75, |
SVGA3D_D32_FLOAT = 76, |
SVGA3D_R32_FLOAT = 34, |
SVGA3D_R32_UINT = 77, |
SVGA3D_R32_SINT = 78, |
SVGA3D_R24G8_TYPELESS = 79, |
SVGA3D_D24_UNORM_S8_UINT = 80, |
SVGA3D_R24_UNORM_X8_TYPELESS = 81, |
SVGA3D_X24_TYPELESS_G8_UINT = 82, |
SVGA3D_R8G8_TYPELESS = 83, |
SVGA3D_R8G8_UNORM = 84, |
SVGA3D_R8G8_UINT = 85, |
SVGA3D_R8G8_SNORM = 27, |
SVGA3D_R8G8_SINT = 86, |
SVGA3D_R16_TYPELESS = 87, |
SVGA3D_R16_FLOAT = 33, |
SVGA3D_D16_UNORM = 8, |
SVGA3D_R16_UNORM = 88, |
SVGA3D_R16_UINT = 89, |
SVGA3D_R16_SNORM = 90, |
SVGA3D_R16_SINT = 91, |
SVGA3D_R8_TYPELESS = 92, |
SVGA3D_R8_UNORM = 93, |
SVGA3D_R8_UINT = 94, |
SVGA3D_R8_SNORM = 95, |
SVGA3D_R8_SINT = 96, |
SVGA3D_A8_UNORM = 32, |
SVGA3D_R1_UNORM = 97, |
SVGA3D_R9G9B9E5_SHAREDEXP = 98, |
SVGA3D_R8G8_B8G8_UNORM = 99, |
SVGA3D_G8R8_G8B8_UNORM = 100, |
SVGA3D_BC1_TYPELESS = 101, |
SVGA3D_BC1_UNORM = 15, |
SVGA3D_BC1_UNORM_SRGB = 102, |
SVGA3D_BC2_TYPELESS = 103, |
SVGA3D_BC2_UNORM = 17, |
SVGA3D_BC2_UNORM_SRGB = 104, |
SVGA3D_BC3_TYPELESS = 105, |
SVGA3D_BC3_UNORM = 19, |
SVGA3D_BC3_UNORM_SRGB = 106, |
SVGA3D_BC4_TYPELESS = 107, |
SVGA3D_BC4_UNORM = 108, |
SVGA3D_BC4_SNORM = 109, |
SVGA3D_BC5_TYPELESS = 110, |
SVGA3D_BC5_UNORM = 111, |
SVGA3D_BC5_SNORM = 112, |
SVGA3D_B5G6R5_UNORM = 3, |
SVGA3D_B5G5R5A1_UNORM = 5, |
SVGA3D_B8G8R8A8_UNORM = 2, |
SVGA3D_B8G8R8X8_UNORM = 1, |
SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, |
SVGA3D_B8G8R8A8_TYPELESS = 114, |
SVGA3D_B8G8R8A8_UNORM_SRGB = 115, |
SVGA3D_B8G8R8X8_TYPELESS = 116, |
SVGA3D_B8G8R8X8_UNORM_SRGB = 117, |
/* Advanced D3D9 depth formats. */ |
SVGA3D_Z_DF16 = 118, |
167,7 → 258,15 |
SVGA3D_Z_DF24 = 119, |
SVGA3D_Z_D24S8_INT = 120, |
SVGA3D_FORMAT_MAX |
/* Planar video formats. */ |
SVGA3D_YV12 = 121, |
/* Shader constant formats. */ |
SVGA3D_SURFACE_SHADERCONST_FLOAT = 122, |
SVGA3D_SURFACE_SHADERCONST_INT = 123, |
SVGA3D_SURFACE_SHADERCONST_BOOL = 124, |
SVGA3D_FORMAT_MAX = 125, |
} SVGA3dSurfaceFormat; |
typedef uint32 SVGA3dColor; /* a, r, g, b */ |
957,15 → 1056,21 |
} SVGA3dCubeFace; |
typedef enum { |
SVGA3D_SHADERTYPE_INVALID = 0, |
SVGA3D_SHADERTYPE_MIN = 1, |
SVGA3D_SHADERTYPE_VS = 1, |
SVGA3D_SHADERTYPE_PS = 2, |
SVGA3D_SHADERTYPE_MAX |
SVGA3D_SHADERTYPE_MAX = 3, |
SVGA3D_SHADERTYPE_GS = 3, |
} SVGA3dShaderType; |
#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) |
typedef enum { |
SVGA3D_CONST_TYPE_FLOAT = 0, |
SVGA3D_CONST_TYPE_INT = 1, |
SVGA3D_CONST_TYPE_BOOL = 2, |
SVGA3D_CONST_TYPE_MAX |
} SVGA3dShaderConstType; |
#define SVGA3D_MAX_SURFACE_FACES 6 |
1056,10 → 1161,75 |
#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 |
#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 |
#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 |
#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42 |
#define SVGA_3D_CMD_SCREEN_DMA 1082 |
#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083 |
#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084 |
#define SVGA_3D_CMD_FUTURE_MAX 2000 |
#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085 |
#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086 |
#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087 |
#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088 |
#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089 |
#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090 |
#define SVGA_3D_CMD_SET_OTABLE_BASE 1091 |
#define SVGA_3D_CMD_READBACK_OTABLE 1092 |
#define SVGA_3D_CMD_DEFINE_GB_MOB 1093 |
#define SVGA_3D_CMD_DESTROY_GB_MOB 1094 |
#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095 |
#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096 |
#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097 |
#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098 |
#define SVGA_3D_CMD_BIND_GB_SURFACE 1099 |
#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100 |
#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101 |
#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102 |
#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103 |
#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104 |
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105 |
#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106 |
#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107 |
#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108 |
#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109 |
#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110 |
#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111 |
#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112 |
#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113 |
#define SVGA_3D_CMD_BIND_GB_SHADER 1114 |
#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115 |
#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116 |
#define SVGA_3D_CMD_END_GB_QUERY 1117 |
#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118 |
#define SVGA_3D_CMD_NOP 1119 |
#define SVGA_3D_CMD_ENABLE_GART 1120 |
#define SVGA_3D_CMD_DISABLE_GART 1121 |
#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122 |
#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123 |
#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124 |
#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125 |
#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126 |
#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127 |
#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128 |
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 |
#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 |
#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 |
#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 |
#define SVGA_3D_CMD_MAX 1142 |
#define SVGA_3D_CMD_FUTURE_MAX 3000 |
/* |
* Common substructures used in multiple FIFO commands: |
*/ |
1750,6 → 1920,495 |
/* |
* Guest-backed surface definitions. |
*/ |
typedef uint32 SVGAMobId; |
typedef enum SVGAMobFormat { |
SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, |
SVGA3D_MOBFMT_PTDEPTH_0 = 0, |
SVGA3D_MOBFMT_PTDEPTH_1 = 1, |
SVGA3D_MOBFMT_PTDEPTH_2 = 2, |
SVGA3D_MOBFMT_RANGE = 3, |
SVGA3D_MOBFMT_PTDEPTH64_0 = 4, |
SVGA3D_MOBFMT_PTDEPTH64_1 = 5, |
SVGA3D_MOBFMT_PTDEPTH64_2 = 6, |
SVGA3D_MOBFMT_MAX, |
} SVGAMobFormat; |
/* |
* Sizes of opaque types. |
*/ |
#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16 |
#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8 |
#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64 |
#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16 |
#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64 |
#define SVGA3D_CONTEXT_DATA_SIZE 16384 |
/* |
* SVGA3dCmdSetOTableBase -- |
* |
* This command allows the guest to specify the base PPN of the |
* specified object table. |
*/ |
typedef enum { |
SVGA_OTABLE_MOB = 0, |
SVGA_OTABLE_MIN = 0, |
SVGA_OTABLE_SURFACE = 1, |
SVGA_OTABLE_CONTEXT = 2, |
SVGA_OTABLE_SHADER = 3, |
SVGA_OTABLE_SCREEN_TARGET = 4, |
SVGA_OTABLE_DX9_MAX = 5, |
SVGA_OTABLE_MAX = 8 |
} SVGAOTableType; |
typedef |
struct { |
SVGAOTableType type; |
PPN baseAddress; |
uint32 sizeInBytes; |
uint32 validSizeInBytes; |
SVGAMobFormat ptDepth; |
} |
__attribute__((__packed__)) |
SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ |
typedef |
struct { |
SVGAOTableType type; |
PPN64 baseAddress; |
uint32 sizeInBytes; |
uint32 validSizeInBytes; |
SVGAMobFormat ptDepth; |
} |
__attribute__((__packed__)) |
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ |
typedef |
struct { |
SVGAOTableType type; |
} |
__attribute__((__packed__)) |
SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ |
/* |
* Define a memory object (Mob) in the OTable. |
*/ |
typedef |
struct SVGA3dCmdDefineGBMob { |
SVGAMobId mobid; |
SVGAMobFormat ptDepth; |
PPN base; |
uint32 sizeInBytes; |
} |
__attribute__((__packed__)) |
SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ |
/* |
* Destroys an object in the OTable. |
*/ |
typedef |
struct SVGA3dCmdDestroyGBMob { |
SVGAMobId mobid; |
} |
__attribute__((__packed__)) |
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ |
/* |
* Redefine an object in the OTable. |
*/ |
typedef |
struct SVGA3dCmdRedefineGBMob { |
SVGAMobId mobid; |
SVGAMobFormat ptDepth; |
PPN base; |
uint32 sizeInBytes; |
} |
__attribute__((__packed__)) |
SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ |
/* |
* Define a memory object (Mob) in the OTable with a PPN64 base. |
*/ |
typedef |
struct SVGA3dCmdDefineGBMob64 { |
SVGAMobId mobid; |
SVGAMobFormat ptDepth; |
PPN64 base; |
uint32 sizeInBytes; |
} |
__attribute__((__packed__)) |
SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ |
/* |
* Redefine an object in the OTable with PPN64 base. |
*/ |
typedef |
struct SVGA3dCmdRedefineGBMob64 { |
SVGAMobId mobid; |
SVGAMobFormat ptDepth; |
PPN64 base; |
uint32 sizeInBytes; |
} |
__attribute__((__packed__)) |
SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ |
/* |
* Notification that the page tables have been modified. |
*/ |
typedef |
struct SVGA3dCmdUpdateGBMobMapping { |
SVGAMobId mobid; |
} |
__attribute__((__packed__)) |
SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ |
/* |
* Define a guest-backed surface. |
*/ |
typedef |
struct SVGA3dCmdDefineGBSurface { |
uint32 sid; |
SVGA3dSurfaceFlags surfaceFlags; |
SVGA3dSurfaceFormat format; |
uint32 numMipLevels; |
uint32 multisampleCount; |
SVGA3dTextureFilter autogenFilter; |
SVGA3dSize size; |
} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ |
/* |
* Destroy a guest-backed surface. |
*/ |
typedef |
struct SVGA3dCmdDestroyGBSurface { |
uint32 sid; |
} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ |
/* |
* Bind a guest-backed surface to an object. |
*/ |
typedef |
struct SVGA3dCmdBindGBSurface { |
uint32 sid; |
SVGAMobId mobid; |
} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ |
/* |
* Conditionally bind a mob to a guest backed surface if testMobid |
* matches the currently bound mob. Optionally issue a readback on |
* the surface while it is still bound to the old mobid if the mobid |
* is changed by this command. |
*/ |
#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) |
typedef |
struct{ |
uint32 sid; |
SVGAMobId testMobid; |
SVGAMobId mobid; |
uint32 flags; |
} |
SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ |
/* |
* Update an image in a guest-backed surface. |
* (Inform the device that the guest-contents have been updated.) |
*/ |
typedef |
struct SVGA3dCmdUpdateGBImage { |
SVGA3dSurfaceImageId image; |
SVGA3dBox box; |
} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ |
/* |
* Update an entire guest-backed surface. |
* (Inform the device that the guest-contents have been updated.) |
*/ |
typedef |
struct SVGA3dCmdUpdateGBSurface { |
uint32 sid; |
} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ |
/* |
* Readback an image in a guest-backed surface. |
* (Request the device to flush the dirty contents into the guest.) |
*/ |
typedef |
struct SVGA3dCmdReadbackGBImage { |
SVGA3dSurfaceImageId image; |
} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ |
/* |
* Readback an entire guest-backed surface. |
* (Request the device to flush the dirty contents into the guest.) |
*/ |
typedef |
struct SVGA3dCmdReadbackGBSurface { |
uint32 sid; |
} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ |
/* |
* Readback a sub rect of an image in a guest-backed surface. After |
* issuing this command the driver is required to issue an update call |
* of the same region before issuing any other commands that reference |
* this surface or rendering is not guaranteed. |
*/ |
typedef |
struct SVGA3dCmdReadbackGBImagePartial { |
SVGA3dSurfaceImageId image; |
SVGA3dBox box; |
uint32 invertBox; |
} |
SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ |
/* |
* Invalidate an image in a guest-backed surface. |
* (Notify the device that the contents can be lost.) |
*/ |
typedef |
struct SVGA3dCmdInvalidateGBImage { |
SVGA3dSurfaceImageId image; |
} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ |
/* |
* Invalidate an entire guest-backed surface. |
* (Notify the device that the contents if all images can be lost.) |
*/ |
typedef |
struct SVGA3dCmdInvalidateGBSurface { |
uint32 sid; |
} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ |
/* |
* Invalidate a sub rect of an image in a guest-backed surface. After |
* issuing this command the driver is required to issue an update call |
* of the same region before issuing any other commands that reference |
* this surface or rendering is not guaranteed. |
*/ |
typedef |
struct SVGA3dCmdInvalidateGBImagePartial { |
SVGA3dSurfaceImageId image; |
SVGA3dBox box; |
uint32 invertBox; |
} |
SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ |
/* |
* Define a guest-backed context. |
*/ |
typedef |
struct SVGA3dCmdDefineGBContext { |
uint32 cid; |
} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ |
/* |
* Destroy a guest-backed context. |
*/ |
typedef |
struct SVGA3dCmdDestroyGBContext { |
uint32 cid; |
} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ |
/* |
* Bind a guest-backed context. |
* |
* validContents should be set to 0 for new contexts, |
* and 1 if this is an old context which is getting paged |
* back on to the device. |
* |
* For new contexts, it is recommended that the driver |
* issue commands to initialize all interesting state |
* prior to rendering. |
*/ |
typedef |
struct SVGA3dCmdBindGBContext { |
uint32 cid; |
SVGAMobId mobid; |
uint32 validContents; |
} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ |
/* |
* Readback a guest-backed context. |
* (Request that the device flush the contents back into guest memory.) |
*/ |
typedef |
struct SVGA3dCmdReadbackGBContext { |
uint32 cid; |
} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ |
/* |
* Invalidate a guest-backed context. |
*/ |
typedef |
struct SVGA3dCmdInvalidateGBContext { |
uint32 cid; |
} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ |
/* |
* Define a guest-backed shader. |
*/ |
typedef |
struct SVGA3dCmdDefineGBShader { |
uint32 shid; |
SVGA3dShaderType type; |
uint32 sizeInBytes; |
} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ |
/* |
* Bind a guest-backed shader. |
*/ |
typedef struct SVGA3dCmdBindGBShader { |
uint32 shid; |
SVGAMobId mobid; |
uint32 offsetInBytes; |
} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ |
/* |
* Destroy a guest-backed shader. |
*/ |
typedef struct SVGA3dCmdDestroyGBShader { |
uint32 shid; |
} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ |
typedef |
struct { |
uint32 cid; |
uint32 regStart; |
SVGA3dShaderType shaderType; |
SVGA3dShaderConstType constType; |
/* |
* Followed by a variable number of shader constants. |
* |
* Note that FLOAT and INT constants are 4-dwords in length, while |
* BOOL constants are 1-dword in length. |
*/ |
} SVGA3dCmdSetGBShaderConstInline; |
/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ |
typedef |
struct { |
uint32 cid; |
SVGA3dQueryType type; |
} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ |
typedef |
struct { |
uint32 cid; |
SVGA3dQueryType type; |
SVGAMobId mobid; |
uint32 offset; |
} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ |
/* |
* SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- |
* |
* The semantics of this command are identical to the |
* SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written |
* to a Mob instead of a GMR. |
*/ |
typedef |
struct { |
uint32 cid; |
SVGA3dQueryType type; |
SVGAMobId mobid; |
uint32 offset; |
} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ |
typedef |
struct { |
SVGAMobId mobid; |
uint32 fbOffset; |
uint32 initalized; |
} |
SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ |
typedef |
struct { |
SVGAMobId mobid; |
uint32 gartOffset; |
} |
SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ |
typedef |
struct { |
uint32 gartOffset; |
uint32 numPages; |
} |
SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ |
/* |
* Screen Targets |
*/ |
#define SVGA_STFLAG_PRIMARY (1 << 0) |
typedef |
struct { |
uint32 stid; |
uint32 width; |
uint32 height; |
int32 xRoot; |
int32 yRoot; |
uint32 flags; |
} |
SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ |
typedef |
struct { |
uint32 stid; |
} |
SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ |
typedef |
struct { |
uint32 stid; |
SVGA3dSurfaceImageId image; |
} |
SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ |
typedef |
struct { |
uint32 stid; |
SVGA3dBox box; |
} |
SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ |
/* |
* Capability query index. |
* |
* Notes: |
1879,10 → 2538,41 |
SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, |
/* |
* Don't add new caps into the previous section; the values in this |
* enumeration must not change. You can put new values right before |
* SVGA3D_DEVCAP_MAX. |
* Deprecated. |
*/ |
SVGA3D_DEVCAP_VGPU10 = 84, |
/* |
* This contains several SVGA_3D_CAPS_VIDEO_DECODE elements |
* ored together, one for every type of video decoding supported. |
*/ |
SVGA3D_DEVCAP_VIDEO_DECODE = 85, |
/* |
* This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements |
* ored together, one for every type of video processing supported. |
*/ |
SVGA3D_DEVCAP_VIDEO_PROCESS = 86, |
SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */ |
SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */ |
SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */ |
SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */ |
SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91, |
/* |
* Does the host support the SVGA logic ops commands? |
*/ |
SVGA3D_DEVCAP_LOGICOPS = 92, |
/* |
* What support does the host have for screen targets? |
* |
* See the SVGA3D_SCREENTARGET_CAP bits below. |
*/ |
SVGA3D_DEVCAP_SCREENTARGETS = 93, |
SVGA3D_DEVCAP_MAX /* This must be the last index. */ |
} SVGA3dDevCapIndex; |
/drivers/video/drm/vmwgfx/svga_reg.h |
---|
169,7 → 169,10 |
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ |
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ |
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ |
SVGA_REG_TOP = 48, /* Must be 1 more than the last register */ |
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ |
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ |
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ |
SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ |
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ |
/* Next 768 (== 256*3) registers exist for colormap */ |
431,8 → 434,11 |
#define SVGA_CAP_TRACES 0x00200000 |
#define SVGA_CAP_GMR2 0x00400000 |
#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 |
#define SVGA_CAP_COMMAND_BUFFERS 0x01000000 |
#define SVGA_CAP_DEAD1 0x02000000 |
#define SVGA_CAP_CMD_BUFFERS_2 0x04000000 |
#define SVGA_CAP_GBOBJECTS 0x08000000 |
/* |
* FIFO register indices. |
* |
/drivers/video/drm/vmwgfx/vmwgfx_buffer.c |
---|
40,6 → 40,10 |
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
TTM_PL_FLAG_CACHED; |
static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | |
TTM_PL_FLAG_CACHED | |
TTM_PL_FLAG_NO_EVICT; |
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
TTM_PL_FLAG_CACHED; |
47,6 → 51,9 |
TTM_PL_FLAG_CACHED | |
TTM_PL_FLAG_NO_EVICT; |
static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | |
TTM_PL_FLAG_CACHED; |
struct ttm_placement vmw_vram_placement = { |
.fpfn = 0, |
.lpfn = 0, |
116,16 → 123,26 |
.busy_placement = &sys_placement_flags |
}; |
struct ttm_placement vmw_sys_ne_placement = { |
.fpfn = 0, |
.lpfn = 0, |
.num_placement = 1, |
.placement = &sys_ne_placement_flags, |
.num_busy_placement = 1, |
.busy_placement = &sys_ne_placement_flags |
}; |
static uint32_t evictable_placement_flags[] = { |
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
}; |
struct ttm_placement vmw_evictable_placement = { |
.fpfn = 0, |
.lpfn = 0, |
.num_placement = 3, |
.num_placement = 4, |
.placement = evictable_placement_flags, |
.num_busy_placement = 1, |
.busy_placement = &sys_placement_flags |
140,38 → 157,485 |
.busy_placement = gmr_vram_placement_flags |
}; |
struct ttm_placement vmw_mob_placement = { |
.fpfn = 0, |
.lpfn = 0, |
.num_placement = 1, |
.num_busy_placement = 1, |
.placement = &mob_placement_flags, |
.busy_placement = &mob_placement_flags |
}; |
struct vmw_ttm_tt { |
struct ttm_tt ttm; |
struct ttm_dma_tt dma_ttm; |
struct vmw_private *dev_priv; |
int gmr_id; |
struct vmw_mob *mob; |
int mem_type; |
struct sg_table sgt; |
struct vmw_sg_table vsgt; |
uint64_t sg_alloc_size; |
bool mapped; |
}; |
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
/** |
* Helper functions to advance a struct vmw_piter iterator. |
* |
* @viter: Pointer to the iterator. |
* |
* These functions return false if past the end of the list, |
* true otherwise. Functions are selected depending on the current |
* DMA mapping mode. |
*/ |
static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) |
{ |
return ++(viter->i) < viter->num_pages; |
} |
static bool __vmw_piter_sg_next(struct vmw_piter *viter) |
{ |
return __sg_page_iter_next(&viter->iter); |
} |
/** |
* Helper functions to return a pointer to the current page. |
* |
* @viter: Pointer to the iterator |
* |
* These functions return a pointer to the page currently |
* pointed to by @viter. Functions are selected depending on the |
* current mapping mode. |
*/ |
static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) |
{ |
return viter->pages[viter->i]; |
} |
static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) |
{ |
return sg_page_iter_page(&viter->iter); |
} |
/** |
* Helper functions to return the DMA address of the current page. |
* |
* @viter: Pointer to the iterator |
* |
* These functions return the DMA address of the page currently |
* pointed to by @viter. Functions are selected depending on the |
* current mapping mode. |
*/ |
static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) |
{ |
return page_to_phys(viter->pages[viter->i]); |
} |
static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) |
{ |
return viter->addrs[viter->i]; |
} |
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) |
{ |
return sg_page_iter_dma_address(&viter->iter); |
} |
/** |
* vmw_piter_start - Initialize a struct vmw_piter. |
* |
* @viter: Pointer to the iterator to initialize |
* @vsgt: Pointer to a struct vmw_sg_table to initialize from |
* |
* Note that we're following the convention of __sg_page_iter_start, so that |
* the iterator doesn't point to a valid page after initialization; it has |
* to be advanced one step first. |
*/ |
void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, |
unsigned long p_offset) |
{ |
viter->i = p_offset - 1; |
viter->num_pages = vsgt->num_pages; |
switch (vsgt->mode) { |
case vmw_dma_phys: |
viter->next = &__vmw_piter_non_sg_next; |
viter->dma_address = &__vmw_piter_phys_addr; |
viter->page = &__vmw_piter_non_sg_page; |
viter->pages = vsgt->pages; |
break; |
case vmw_dma_alloc_coherent: |
viter->next = &__vmw_piter_non_sg_next; |
viter->dma_address = &__vmw_piter_dma_addr; |
viter->page = &__vmw_piter_non_sg_page; |
viter->addrs = vsgt->addrs; |
viter->pages = vsgt->pages; |
break; |
case vmw_dma_map_populate: |
case vmw_dma_map_bind: |
viter->next = &__vmw_piter_sg_next; |
viter->dma_address = &__vmw_piter_sg_addr; |
viter->page = &__vmw_piter_sg_page; |
__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, |
vsgt->sgt->orig_nents, p_offset); |
break; |
default: |
BUG(); |
} |
} |
/** |
* vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for |
* TTM pages |
* |
* @vmw_tt: Pointer to a struct vmw_ttm_backend |
* |
* Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. |
*/ |
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) |
{ |
struct device *dev = vmw_tt->dev_priv->dev->dev; |
dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, |
DMA_BIDIRECTIONAL); |
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; |
} |
/** |
* vmw_ttm_map_for_dma - map TTM pages to get device addresses |
* |
* @vmw_tt: Pointer to a struct vmw_ttm_backend |
* |
* This function is used to get device addresses from the kernel DMA layer. |
* However, it's violating the DMA API in that when this operation has been |
* performed, it's illegal for the CPU to write to the pages without first |
* unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is |
* therefore only legal to call this function if we know that the function |
* dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most |
* a CPU write buffer flush. |
*/ |
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) |
{ |
struct device *dev = vmw_tt->dev_priv->dev->dev; |
int ret; |
ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, |
DMA_BIDIRECTIONAL); |
if (unlikely(ret == 0)) |
return -ENOMEM; |
vmw_tt->sgt.nents = ret; |
return 0; |
} |
/** |
* vmw_ttm_map_dma - Make sure TTM pages are visible to the device |
* |
* @vmw_tt: Pointer to a struct vmw_ttm_tt |
* |
* Select the correct function for and make sure the TTM pages are |
* visible to the device. Allocate storage for the device mappings. |
* If a mapping has already been performed, indicated by the storage |
* pointer being non NULL, the function returns success. |
*/ |
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) |
{ |
struct vmw_private *dev_priv = vmw_tt->dev_priv; |
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
struct vmw_sg_table *vsgt = &vmw_tt->vsgt; |
struct vmw_piter iter; |
dma_addr_t old; |
int ret = 0; |
static size_t sgl_size; |
static size_t sgt_size; |
if (vmw_tt->mapped) |
return 0; |
vsgt->mode = dev_priv->map_mode; |
vsgt->pages = vmw_tt->dma_ttm.ttm.pages; |
vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; |
vsgt->addrs = vmw_tt->dma_ttm.dma_address; |
vsgt->sgt = &vmw_tt->sgt; |
switch (dev_priv->map_mode) { |
case vmw_dma_map_bind: |
case vmw_dma_map_populate: |
if (unlikely(!sgl_size)) { |
sgl_size = ttm_round_pot(sizeof(struct scatterlist)); |
sgt_size = ttm_round_pot(sizeof(struct sg_table)); |
} |
vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; |
ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, |
true); |
if (unlikely(ret != 0)) |
return ret; |
ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, |
vsgt->num_pages, 0, |
(unsigned long) |
vsgt->num_pages << PAGE_SHIFT, |
GFP_KERNEL); |
if (unlikely(ret != 0)) |
goto out_sg_alloc_fail; |
if (vsgt->num_pages > vmw_tt->sgt.nents) { |
uint64_t over_alloc = |
sgl_size * (vsgt->num_pages - |
vmw_tt->sgt.nents); |
ttm_mem_global_free(glob, over_alloc); |
vmw_tt->sg_alloc_size -= over_alloc; |
} |
ret = vmw_ttm_map_for_dma(vmw_tt); |
if (unlikely(ret != 0)) |
goto out_map_fail; |
break; |
default: |
break; |
} |
old = ~((dma_addr_t) 0); |
vmw_tt->vsgt.num_regions = 0; |
for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { |
dma_addr_t cur = vmw_piter_dma_addr(&iter); |
if (cur != old + PAGE_SIZE) |
vmw_tt->vsgt.num_regions++; |
old = cur; |
} |
vmw_tt->mapped = true; |
return 0; |
out_map_fail: |
sg_free_table(vmw_tt->vsgt.sgt); |
vmw_tt->vsgt.sgt = NULL; |
out_sg_alloc_fail: |
ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); |
return ret; |
} |
/** |
* vmw_ttm_unmap_dma - Tear down any TTM page device mappings |
* |
* @vmw_tt: Pointer to a struct vmw_ttm_tt |
* |
* Tear down any previously set up device DMA mappings and free |
* any storage space allocated for them. If there are no mappings set up, |
* this function is a NOP. |
*/ |
static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) |
{ |
struct vmw_private *dev_priv = vmw_tt->dev_priv; |
if (!vmw_tt->vsgt.sgt) |
return; |
switch (dev_priv->map_mode) { |
case vmw_dma_map_bind: |
case vmw_dma_map_populate: |
vmw_ttm_unmap_from_dma(vmw_tt); |
sg_free_table(vmw_tt->vsgt.sgt); |
vmw_tt->vsgt.sgt = NULL; |
ttm_mem_global_free(vmw_mem_glob(dev_priv), |
vmw_tt->sg_alloc_size); |
break; |
default: |
break; |
} |
vmw_tt->mapped = false; |
} |
/** |
* vmw_bo_map_dma - Make sure buffer object pages are visible to the device |
* |
* @bo: Pointer to a struct ttm_buffer_object |
* |
* Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer |
* instead of a pointer to a struct vmw_ttm_backend as argument. |
* Note that the buffer object must be either pinned or reserved before |
* calling this function. |
*/ |
int vmw_bo_map_dma(struct ttm_buffer_object *bo) |
{ |
struct vmw_ttm_tt *vmw_tt = |
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
return vmw_ttm_map_dma(vmw_tt); |
} |
/** |
* vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device |
* |
* @bo: Pointer to a struct ttm_buffer_object |
* |
* Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer |
* instead of a pointer to a struct vmw_ttm_backend as argument. |
*/ |
void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) |
{ |
struct vmw_ttm_tt *vmw_tt = |
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
vmw_ttm_unmap_dma(vmw_tt); |
} |
/** |
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a |
* TTM buffer object |
* |
* @bo: Pointer to a struct ttm_buffer_object |
* |
* Returns a pointer to a struct vmw_sg_table object. The object should |
* not be freed after use. |
* Note that for the device addresses to be valid, the buffer object must |
* either be reserved or pinned. |
*/ |
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) |
{ |
struct vmw_ttm_tt *vmw_tt = |
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
return &vmw_tt->vsgt; |
} |
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
{ |
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
struct vmw_ttm_tt *vmw_be = |
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
int ret; |
ret = vmw_ttm_map_dma(vmw_be); |
if (unlikely(ret != 0)) |
return ret; |
vmw_be->gmr_id = bo_mem->start; |
vmw_be->mem_type = bo_mem->mem_type; |
return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages, |
switch (bo_mem->mem_type) { |
case VMW_PL_GMR: |
return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
ttm->num_pages, vmw_be->gmr_id); |
case VMW_PL_MOB: |
if (unlikely(vmw_be->mob == NULL)) { |
vmw_be->mob = |
vmw_mob_create(ttm->num_pages); |
if (unlikely(vmw_be->mob == NULL)) |
return -ENOMEM; |
} |
return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
&vmw_be->vsgt, ttm->num_pages, |
vmw_be->gmr_id); |
default: |
BUG(); |
} |
return 0; |
} |
static int vmw_ttm_unbind(struct ttm_tt *ttm) |
{ |
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
struct vmw_ttm_tt *vmw_be = |
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
switch (vmw_be->mem_type) { |
case VMW_PL_GMR: |
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); |
break; |
case VMW_PL_MOB: |
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); |
break; |
default: |
BUG(); |
} |
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
vmw_ttm_unmap_dma(vmw_be); |
return 0; |
} |
static void vmw_ttm_destroy(struct ttm_tt *ttm) |
{ |
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
struct vmw_ttm_tt *vmw_be = |
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
vmw_ttm_unmap_dma(vmw_be); |
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
ttm_dma_tt_fini(&vmw_be->dma_ttm); |
else |
ttm_tt_fini(ttm); |
if (vmw_be->mob) |
vmw_mob_destroy(vmw_be->mob); |
kfree(vmw_be); |
} |
static int vmw_ttm_populate(struct ttm_tt *ttm) |
{ |
struct vmw_ttm_tt *vmw_tt = |
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
struct vmw_private *dev_priv = vmw_tt->dev_priv; |
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
int ret; |
if (ttm->state != tt_unpopulated) |
return 0; |
if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
size_t size = |
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
ret = ttm_mem_global_alloc(glob, size, false, true); |
if (unlikely(ret != 0)) |
return ret; |
ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
if (unlikely(ret != 0)) |
ttm_mem_global_free(glob, size); |
} else |
ret = ttm_pool_populate(ttm); |
return ret; |
} |
static void vmw_ttm_unpopulate(struct ttm_tt *ttm) |
{ |
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, |
dma_ttm.ttm); |
struct vmw_private *dev_priv = vmw_tt->dev_priv; |
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
if (vmw_tt->mob) { |
vmw_mob_destroy(vmw_tt->mob); |
vmw_tt->mob = NULL; |
} |
vmw_ttm_unmap_dma(vmw_tt); |
if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
size_t size = |
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
ttm_mem_global_free(glob, size); |
} else |
ttm_pool_unpopulate(ttm); |
} |
static struct ttm_backend_func vmw_ttm_func = { |
.bind = vmw_ttm_bind, |
.unbind = vmw_ttm_unbind, |
178,33 → 642,42 |
.destroy = vmw_ttm_destroy, |
}; |
struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
unsigned long size, uint32_t page_flags, |
struct page *dummy_read_page) |
{ |
struct vmw_ttm_tt *vmw_be; |
int ret; |
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); |
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
if (!vmw_be) |
return NULL; |
vmw_be->ttm.func = &vmw_ttm_func; |
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
vmw_be->mob = NULL; |
if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) { |
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
dummy_read_page); |
else |
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, |
dummy_read_page); |
if (unlikely(ret != 0)) |
goto out_no_init; |
return &vmw_be->dma_ttm.ttm; |
out_no_init: |
kfree(vmw_be); |
return NULL; |
} |
return &vmw_be->ttm; |
} |
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
{ |
return 0; |
} |
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
struct ttm_mem_type_manager *man) |
{ |
switch (type) { |
224,6 → 697,7 |
man->default_caching = TTM_PL_FLAG_CACHED; |
break; |
case VMW_PL_GMR: |
case VMW_PL_MOB: |
/* |
* "Guest Memory Regions" is an aperture like feature with |
* one slot per bo. There is an upper limit of the number of |
242,7 → 716,7 |
return 0; |
} |
void vmw_evict_flags(struct ttm_buffer_object *bo, |
static void vmw_evict_flags(struct ttm_buffer_object *bo, |
struct ttm_placement *placement) |
{ |
*placement = vmw_sys_placement; |
271,6 → 745,7 |
switch (mem->mem_type) { |
case TTM_PL_SYSTEM: |
case VMW_PL_GMR: |
case VMW_PL_MOB: |
return 0; |
case TTM_PL_VRAM: |
mem->bus.offset = mem->start << PAGE_SHIFT; |
330,10 → 805,42 |
VMW_FENCE_WAIT_TIMEOUT); |
} |
/** |
* vmw_move_notify - TTM move_notify_callback |
* |
* @bo: The TTM buffer object about to move. |
* @mem: The truct ttm_mem_reg indicating to what memory |
* region the move is taking place. |
* |
* Calls move_notify for all subsystems needing it. |
* (currently only resources). |
*/ |
static void vmw_move_notify(struct ttm_buffer_object *bo, |
struct ttm_mem_reg *mem) |
{ |
vmw_resource_move_notify(bo, mem); |
} |
/** |
* vmw_swap_notify - TTM move_notify_callback |
* |
* @bo: The TTM buffer object about to be swapped out. |
*/ |
static void vmw_swap_notify(struct ttm_buffer_object *bo) |
{ |
struct ttm_bo_device *bdev = bo->bdev; |
// spin_lock(&bdev->fence_lock); |
// ttm_bo_wait(bo, false, false, false); |
// spin_unlock(&bdev->fence_lock); |
} |
struct ttm_bo_driver vmw_bo_driver = { |
.ttm_tt_create = &vmw_ttm_tt_create, |
.ttm_tt_populate = &ttm_pool_populate, |
.ttm_tt_unpopulate = &ttm_pool_unpopulate, |
.ttm_tt_populate = &vmw_ttm_populate, |
.ttm_tt_unpopulate = &vmw_ttm_unpopulate, |
.invalidate_caches = vmw_invalidate_caches, |
.init_mem_type = vmw_init_mem_type, |
.evict_flags = vmw_evict_flags, |
344,9 → 851,243 |
.sync_obj_flush = vmw_sync_obj_flush, |
.sync_obj_unref = vmw_sync_obj_unref, |
.sync_obj_ref = vmw_sync_obj_ref, |
.move_notify = NULL, |
.swap_notify = NULL, |
.move_notify = vmw_move_notify, |
.swap_notify = vmw_swap_notify, |
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
.io_mem_reserve = &vmw_ttm_io_mem_reserve, |
.io_mem_free = &vmw_ttm_io_mem_free, |
}; |
struct scatterlist *sg_next(struct scatterlist *sg) |
{ |
if (sg_is_last(sg)) |
return NULL; |
sg++; |
if (unlikely(sg_is_chain(sg))) |
sg = sg_chain_ptr(sg); |
return sg; |
} |
void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
sg_free_fn *free_fn) |
{ |
struct scatterlist *sgl, *next; |
if (unlikely(!table->sgl)) |
return; |
sgl = table->sgl; |
while (table->orig_nents) { |
unsigned int alloc_size = table->orig_nents; |
unsigned int sg_size; |
/* |
* If we have more than max_ents segments left, |
* then assign 'next' to the sg table after the current one. |
* sg_size is then one less than alloc size, since the last |
* element is the chain pointer. |
*/ |
if (alloc_size > max_ents) { |
next = sg_chain_ptr(&sgl[max_ents - 1]); |
alloc_size = max_ents; |
sg_size = alloc_size - 1; |
} else { |
sg_size = alloc_size; |
next = NULL; |
} |
table->orig_nents -= sg_size; |
kfree(sgl); |
sgl = next; |
} |
table->sgl = NULL; |
} |
void sg_free_table(struct sg_table *table) |
{ |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
} |
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
{ |
struct scatterlist *sg, *prv; |
unsigned int left; |
unsigned int max_ents = SG_MAX_SINGLE_ALLOC; |
#ifndef ARCH_HAS_SG_CHAIN |
BUG_ON(nents > max_ents); |
#endif |
memset(table, 0, sizeof(*table)); |
left = nents; |
prv = NULL; |
do { |
unsigned int sg_size, alloc_size = left; |
if (alloc_size > max_ents) { |
alloc_size = max_ents; |
sg_size = alloc_size - 1; |
} else |
sg_size = alloc_size; |
left -= sg_size; |
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask); |
if (unlikely(!sg)) { |
/* |
* Adjust entry count to reflect that the last |
* entry of the previous table won't be used for |
* linkage. Without this, sg_kfree() may get |
* confused. |
*/ |
if (prv) |
table->nents = ++table->orig_nents; |
goto err; |
} |
sg_init_table(sg, alloc_size); |
table->nents = table->orig_nents += sg_size; |
/* |
* If this is the first mapping, assign the sg table header. |
* If this is not the first mapping, chain previous part. |
*/ |
if (prv) |
sg_chain(prv, max_ents, sg); |
else |
table->sgl = sg; |
/* |
* If no more entries after this one, mark the end |
*/ |
if (!left) |
sg_mark_end(&sg[sg_size - 1]); |
prv = sg; |
} while (left); |
return 0; |
err: |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
return -ENOMEM; |
} |
void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
{ |
memset(sgl, 0, sizeof(*sgl) * nents); |
#ifdef CONFIG_DEBUG_SG |
{ |
unsigned int i; |
for (i = 0; i < nents; i++) |
sgl[i].sg_magic = SG_MAGIC; |
} |
#endif |
sg_mark_end(&sgl[nents - 1]); |
} |
void __sg_page_iter_start(struct sg_page_iter *piter, |
struct scatterlist *sglist, unsigned int nents, |
unsigned long pgoffset) |
{ |
piter->__pg_advance = 0; |
piter->__nents = nents; |
piter->sg = sglist; |
piter->sg_pgoffset = pgoffset; |
} |
static int sg_page_count(struct scatterlist *sg) |
{ |
return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
} |
bool __sg_page_iter_next(struct sg_page_iter *piter) |
{ |
if (!piter->__nents || !piter->sg) |
return false; |
piter->sg_pgoffset += piter->__pg_advance; |
piter->__pg_advance = 1; |
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
piter->sg_pgoffset -= sg_page_count(piter->sg); |
piter->sg = sg_next(piter->sg); |
if (!--piter->__nents || !piter->sg) |
return false; |
} |
return true; |
} |
EXPORT_SYMBOL(__sg_page_iter_next); |
int sg_alloc_table_from_pages(struct sg_table *sgt, |
struct page **pages, unsigned int n_pages, |
unsigned long offset, unsigned long size, |
gfp_t gfp_mask) |
{ |
unsigned int chunks; |
unsigned int i; |
unsigned int cur_page; |
int ret; |
struct scatterlist *s; |
/* compute number of contiguous chunks */ |
chunks = 1; |
for (i = 1; i < n_pages; ++i) |
if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) |
++chunks; |
ret = sg_alloc_table(sgt, chunks, gfp_mask); |
if (unlikely(ret)) |
return ret; |
/* merging chunks and putting them into the scatterlist */ |
cur_page = 0; |
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { |
unsigned long chunk_size; |
unsigned int j; |
/* look for the end of the current chunk */ |
for (j = cur_page + 1; j < n_pages; ++j) |
if (page_to_pfn(pages[j]) != |
page_to_pfn(pages[j - 1]) + 1) |
break; |
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; |
sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); |
size -= chunk_size; |
offset = 0; |
cur_page = j; |
} |
return 0; |
} |
int dma_map_sg(struct device *dev, struct scatterlist *sglist, |
int nelems, int dir) |
{ |
struct scatterlist *s; |
int i; |
for_each_sg(sglist, s, nelems, i) { |
s->dma_address = (dma_addr_t)sg_phys(s); |
#ifdef CONFIG_NEED_SG_DMA_LENGTH |
s->dma_length = s->length; |
#endif |
} |
return nelems; |
} |
/drivers/video/drm/vmwgfx/vmwgfx_context.c |
---|
32,12 → 32,28 |
struct vmw_user_context { |
struct ttm_base_object base; |
struct vmw_resource res; |
struct vmw_ctx_binding_state cbs; |
}; |
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); |
static void vmw_user_context_free(struct vmw_resource *res); |
static struct vmw_resource * |
vmw_user_context_base_to_res(struct ttm_base_object *base); |
static int vmw_gb_context_create(struct vmw_resource *res); |
static int vmw_gb_context_bind(struct vmw_resource *res, |
struct ttm_validate_buffer *val_buf); |
static int vmw_gb_context_unbind(struct vmw_resource *res, |
bool readback, |
struct ttm_validate_buffer *val_buf); |
static int vmw_gb_context_destroy(struct vmw_resource *res); |
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); |
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); |
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); |
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); |
static uint64_t vmw_user_context_size; |
static const struct vmw_user_resource_conv user_context_conv = { |
62,6 → 78,23 |
.unbind = NULL |
}; |
static const struct vmw_res_func vmw_gb_context_func = { |
.res_type = vmw_res_context, |
.needs_backup = true, |
.may_evict = true, |
.type_name = "guest backed contexts", |
.backup_placement = &vmw_mob_placement, |
.create = vmw_gb_context_create, |
.destroy = vmw_gb_context_destroy, |
.bind = vmw_gb_context_bind, |
.unbind = vmw_gb_context_unbind |
}; |
static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { |
[vmw_ctx_binding_shader] = vmw_context_scrub_shader, |
[vmw_ctx_binding_rt] = vmw_context_scrub_render_target, |
[vmw_ctx_binding_tex] = vmw_context_scrub_texture }; |
/** |
* Context management: |
*/ |
76,6 → 109,16 |
} *cmd; |
if (res->func->destroy == vmw_gb_context_destroy) { |
mutex_lock(&dev_priv->cmdbuf_mutex); |
(void) vmw_gb_context_destroy(res); |
if (dev_priv->pinned_bo != NULL && |
!dev_priv->query_cid_valid) |
__vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
mutex_unlock(&dev_priv->cmdbuf_mutex); |
return; |
} |
vmw_execbuf_release_pinned_bo(dev_priv); |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
92,6 → 135,33 |
vmw_3d_resource_dec(dev_priv, false); |
} |
static int vmw_gb_context_init(struct vmw_private *dev_priv, |
struct vmw_resource *res, |
void (*res_free) (struct vmw_resource *res)) |
{ |
int ret; |
struct vmw_user_context *uctx = |
container_of(res, struct vmw_user_context, res); |
ret = vmw_resource_init(dev_priv, res, true, |
res_free, &vmw_gb_context_func); |
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; |
if (unlikely(ret != 0)) { |
if (res_free) |
res_free(res); |
else |
kfree(res); |
return ret; |
} |
memset(&uctx->cbs, 0, sizeof(uctx->cbs)); |
INIT_LIST_HEAD(&uctx->cbs.list); |
vmw_resource_activate(res, vmw_hw_context_destroy); |
return 0; |
} |
static int vmw_context_init(struct vmw_private *dev_priv, |
struct vmw_resource *res, |
void (*res_free) (struct vmw_resource *res)) |
103,6 → 173,9 |
SVGA3dCmdDefineContext body; |
} *cmd; |
if (dev_priv->has_mob) |
return vmw_gb_context_init(dev_priv, res, res_free); |
ret = vmw_resource_init(dev_priv, res, false, |
res_free, &vmw_legacy_context_func); |
154,6 → 227,184 |
return (ret == 0) ? res : NULL; |
} |
static int vmw_gb_context_create(struct vmw_resource *res) |
{ |
struct vmw_private *dev_priv = res->dev_priv; |
int ret; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdDefineGBContext body; |
} *cmd; |
if (likely(res->id != -1)) |
return 0; |
ret = vmw_resource_alloc_id(res); |
if (unlikely(ret != 0)) { |
DRM_ERROR("Failed to allocate a context id.\n"); |
goto out_no_id; |
} |
if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { |
ret = -EBUSY; |
goto out_no_fifo; |
} |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for context " |
"creation.\n"); |
ret = -ENOMEM; |
goto out_no_fifo; |
} |
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.cid = res->id; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
(void) vmw_3d_resource_inc(dev_priv, false); |
return 0; |
out_no_fifo: |
vmw_resource_release_id(res); |
out_no_id: |
return ret; |
} |
static int vmw_gb_context_bind(struct vmw_resource *res, |
struct ttm_validate_buffer *val_buf) |
{ |
struct vmw_private *dev_priv = res->dev_priv; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdBindGBContext body; |
} *cmd; |
struct ttm_buffer_object *bo = val_buf->bo; |
BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for context " |
"binding.\n"); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.cid = res->id; |
cmd->body.mobid = bo->mem.start; |
cmd->body.validContents = res->backup_dirty; |
res->backup_dirty = false; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
return 0; |
} |
static int vmw_gb_context_unbind(struct vmw_resource *res, |
bool readback, |
struct ttm_validate_buffer *val_buf) |
{ |
struct vmw_private *dev_priv = res->dev_priv; |
struct ttm_buffer_object *bo = val_buf->bo; |
struct vmw_fence_obj *fence; |
struct vmw_user_context *uctx = |
container_of(res, struct vmw_user_context, res); |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdReadbackGBContext body; |
} *cmd1; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdBindGBContext body; |
} *cmd2; |
uint32_t submit_size; |
uint8_t *cmd; |
BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
mutex_lock(&dev_priv->binding_mutex); |
vmw_context_binding_state_kill(&uctx->cbs); |
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
cmd = vmw_fifo_reserve(dev_priv, submit_size); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for context " |
"unbinding.\n"); |
mutex_unlock(&dev_priv->binding_mutex); |
return -ENOMEM; |
} |
cmd2 = (void *) cmd; |
if (readback) { |
cmd1 = (void *) cmd; |
cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; |
cmd1->header.size = sizeof(cmd1->body); |
cmd1->body.cid = res->id; |
cmd2 = (void *) (&cmd1[1]); |
} |
cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; |
cmd2->header.size = sizeof(cmd2->body); |
cmd2->body.cid = res->id; |
cmd2->body.mobid = SVGA3D_INVALID_ID; |
vmw_fifo_commit(dev_priv, submit_size); |
mutex_unlock(&dev_priv->binding_mutex); |
/* |
* Create a fence object and fence the backup buffer. |
*/ |
(void) vmw_execbuf_fence_commands(NULL, dev_priv, |
&fence, NULL); |
vmw_fence_single_bo(bo, fence); |
if (likely(fence != NULL)) |
vmw_fence_obj_unreference(&fence); |
return 0; |
} |
static int vmw_gb_context_destroy(struct vmw_resource *res) |
{ |
struct vmw_private *dev_priv = res->dev_priv; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdDestroyGBContext body; |
} *cmd; |
struct vmw_user_context *uctx = |
container_of(res, struct vmw_user_context, res); |
BUG_ON(!list_empty(&uctx->cbs.list)); |
if (likely(res->id == -1)) |
return 0; |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for context " |
"destruction.\n"); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.cid = res->id; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
if (dev_priv->query_cid == res->id) |
dev_priv->query_cid_valid = false; |
vmw_resource_release_id(res); |
vmw_3d_resource_dec(dev_priv, false); |
return 0; |
} |
/** |
* User-space context management: |
*/ |
274,3 → 525,283 |
} |
#endif |
/** |
* vmw_context_scrub_shader - scrub a shader binding from a context. |
* |
* @bi: single binding information. |
*/ |
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) |
{ |
struct vmw_private *dev_priv = bi->ctx->dev_priv; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdSetShader body; |
} *cmd; |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for shader " |
"unbinding.\n"); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_SET_SHADER; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.cid = bi->ctx->id; |
cmd->body.type = bi->i1.shader_type; |
cmd->body.shid = SVGA3D_INVALID_ID; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
return 0; |
} |
/** |
* vmw_context_scrub_render_target - scrub a render target binding |
* from a context. |
* |
* @bi: single binding information. |
*/ |
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) |
{ |
struct vmw_private *dev_priv = bi->ctx->dev_priv; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdSetRenderTarget body; |
} *cmd; |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for render target " |
"unbinding.\n"); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.cid = bi->ctx->id; |
cmd->body.type = bi->i1.rt_type; |
cmd->body.target.sid = SVGA3D_INVALID_ID; |
cmd->body.target.face = 0; |
cmd->body.target.mipmap = 0; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
return 0; |
} |
/** |
* vmw_context_scrub_texture - scrub a texture binding from a context. |
* |
* @bi: single binding information. |
* |
* TODO: Possibly complement this function with a function that takes |
* a list of texture bindings and combines them to a single command. |
*/ |
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) |
{ |
struct vmw_private *dev_priv = bi->ctx->dev_priv; |
struct { |
SVGA3dCmdHeader header; |
struct { |
SVGA3dCmdSetTextureState c; |
SVGA3dTextureState s1; |
} body; |
} *cmd; |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for texture " |
"unbinding.\n"); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.c.cid = bi->ctx->id; |
cmd->body.s1.stage = bi->i1.texture_stage; |
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; |
cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
return 0; |
} |
/** |
* vmw_context_binding_drop: Stop tracking a context binding |
* |
* @cb: Pointer to binding tracker storage. |
* |
* Stops tracking a context binding, and re-initializes its storage. |
* Typically used when the context binding is replaced with a binding to |
* another (or the same, for that matter) resource. |
*/ |
static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) |
{ |
list_del(&cb->ctx_list); |
if (!list_empty(&cb->res_list)) |
list_del(&cb->res_list); |
cb->bi.ctx = NULL; |
} |
/** |
* vmw_context_binding_add: Start tracking a context binding |
* |
* @cbs: Pointer to the context binding state tracker. |
* @bi: Information about the binding to track. |
* |
* Performs basic checks on the binding to make sure arguments are within |
* bounds and then starts tracking the binding in the context binding |
* state structure @cbs. |
*/ |
int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, |
const struct vmw_ctx_bindinfo *bi) |
{ |
struct vmw_ctx_binding *loc; |
switch (bi->bt) { |
case vmw_ctx_binding_rt: |
if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { |
DRM_ERROR("Illegal render target type %u.\n", |
(unsigned) bi->i1.rt_type); |
return -EINVAL; |
} |
loc = &cbs->render_targets[bi->i1.rt_type]; |
break; |
case vmw_ctx_binding_tex: |
if (unlikely((unsigned)bi->i1.texture_stage >= |
SVGA3D_NUM_TEXTURE_UNITS)) { |
DRM_ERROR("Illegal texture/sampler unit %u.\n", |
(unsigned) bi->i1.texture_stage); |
return -EINVAL; |
} |
loc = &cbs->texture_units[bi->i1.texture_stage]; |
break; |
case vmw_ctx_binding_shader: |
if (unlikely((unsigned)bi->i1.shader_type >= |
SVGA3D_SHADERTYPE_MAX)) { |
DRM_ERROR("Illegal shader type %u.\n", |
(unsigned) bi->i1.shader_type); |
return -EINVAL; |
} |
loc = &cbs->shaders[bi->i1.shader_type]; |
break; |
default: |
BUG(); |
} |
if (loc->bi.ctx != NULL) |
vmw_context_binding_drop(loc); |
loc->bi = *bi; |
list_add_tail(&loc->ctx_list, &cbs->list); |
INIT_LIST_HEAD(&loc->res_list); |
return 0; |
} |
/** |
* vmw_context_binding_transfer: Transfer a context binding tracking entry. |
* |
* @cbs: Pointer to the persistent context binding state tracker. |
* @bi: Information about the binding to track. |
* |
*/ |
static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, |
const struct vmw_ctx_bindinfo *bi) |
{ |
struct vmw_ctx_binding *loc; |
switch (bi->bt) { |
case vmw_ctx_binding_rt: |
loc = &cbs->render_targets[bi->i1.rt_type]; |
break; |
case vmw_ctx_binding_tex: |
loc = &cbs->texture_units[bi->i1.texture_stage]; |
break; |
case vmw_ctx_binding_shader: |
loc = &cbs->shaders[bi->i1.shader_type]; |
break; |
default: |
BUG(); |
} |
if (loc->bi.ctx != NULL) |
vmw_context_binding_drop(loc); |
loc->bi = *bi; |
list_add_tail(&loc->ctx_list, &cbs->list); |
if (bi->res != NULL) |
list_add_tail(&loc->res_list, &bi->res->binding_head); |
else |
INIT_LIST_HEAD(&loc->res_list); |
} |
/** |
* vmw_context_binding_kill - Kill a binding on the device |
* and stop tracking it. |
* |
* @cb: Pointer to binding tracker storage. |
* |
* Emits FIFO commands to scrub a binding represented by @cb. |
* Then stops tracking the binding and re-initializes its storage. |
*/ |
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) |
{ |
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); |
vmw_context_binding_drop(cb); |
} |
/** |
* vmw_context_binding_state_kill - Kill all bindings associated with a |
* struct vmw_ctx_binding state structure, and re-initialize the structure. |
* |
* @cbs: Pointer to the context binding state tracker. |
* |
* Emits commands to scrub all bindings associated with the |
* context binding state tracker. Then re-initializes the whole structure. |
*/ |
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) |
{ |
struct vmw_ctx_binding *entry, *next; |
list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) |
vmw_context_binding_kill(entry); |
} |
/** |
* vmw_context_binding_res_list_kill - Kill all bindings on a |
* resource binding list |
* |
* @head: list head of resource binding list |
* |
* Kills all bindings associated with a specific resource. Typically |
* called before the resource is destroyed. |
*/ |
void vmw_context_binding_res_list_kill(struct list_head *head) |
{ |
struct vmw_ctx_binding *entry, *next; |
list_for_each_entry_safe(entry, next, head, res_list) |
vmw_context_binding_kill(entry); |
} |
/** |
* vmw_context_binding_state_transfer - Commit staged binding info |
* |
* @ctx: Pointer to context to commit the staged binding info to. |
* @from: Staged binding info built during execbuf. |
* |
* Transfers binding info from a temporary structure to the persistent |
* structure in the context. This can be done once commands |
*/ |
void vmw_context_binding_state_transfer(struct vmw_resource *ctx, |
struct vmw_ctx_binding_state *from) |
{ |
struct vmw_user_context *uctx = |
container_of(ctx, struct vmw_user_context, res); |
struct vmw_ctx_binding *entry, *next; |
list_for_each_entry_safe(entry, next, &from->list, ctx_list) |
vmw_context_binding_transfer(&uctx->cbs, &entry->bi); |
} |
/drivers/video/drm/vmwgfx/vmwgfx_dmabuf.c |
---|
290,8 → 290,7 |
/** |
* vmw_bo_pin - Pin or unpin a buffer object without moving it. |
* |
* @bo: The buffer object. Must be reserved, and present either in VRAM |
* or GMR memory. |
* @bo: The buffer object. Must be reserved. |
* @pin: Whether to pin or unpin. |
* |
*/ |
303,10 → 302,9 |
int ret; |
lockdep_assert_held(&bo->resv->lock.base); |
BUG_ON(old_mem_type != TTM_PL_VRAM && |
old_mem_type != VMW_PL_GMR); |
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; |
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB |
| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; |
if (pin) |
pl_flags |= TTM_PL_FLAG_NO_EVICT; |
/drivers/video/drm/vmwgfx/vmwgfx_drv.c |
---|
32,6 → 32,7 |
#include <drm/ttm/ttm_bo_driver.h> |
#include <drm/ttm/ttm_object.h> |
//#include <drm/ttm/ttm_module.h> |
#include <linux/dma_remapping.h> |
#define VMWGFX_DRIVER_NAME "vmwgfx" |
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
111,6 → 112,21 |
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
struct drm_vmw_update_layout_arg) |
#define DRM_IOCTL_VMW_CREATE_SHADER \ |
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ |
struct drm_vmw_shader_create_arg) |
#define DRM_IOCTL_VMW_UNREF_SHADER \ |
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ |
struct drm_vmw_shader_arg) |
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ |
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ |
union drm_vmw_gb_surface_create_arg) |
#define DRM_IOCTL_VMW_GB_SURFACE_REF \ |
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ |
union drm_vmw_gb_surface_reference_arg) |
#define DRM_IOCTL_VMW_SYNCCPU \ |
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ |
struct drm_vmw_synccpu_arg) |
/** |
* The core DRM version of this macro doesn't account for |
176,6 → 192,21 |
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
vmw_kms_update_layout_ioctl, |
DRM_MASTER | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_CREATE_SHADER, |
vmw_shader_define_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_UNREF_SHADER, |
vmw_shader_destroy_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, |
vmw_gb_surface_define_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, |
vmw_gb_surface_reference_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
VMW_IOCTL_DEF(VMW_SYNCCPU, |
vmw_user_dmabuf_synccpu_ioctl, |
DRM_AUTH | DRM_UNLOCKED), |
}; |
#endif |
185,6 → 216,10 |
}; |
static int enable_fbdev = 1; |
static int vmw_force_iommu; |
static int vmw_restrict_iommu; |
static int vmw_force_coherent; |
static int vmw_restrict_dma_mask; |
static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
static void vmw_master_init(struct vmw_master *); |
191,7 → 226,16 |
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); |
module_param_named(enable_fbdev, enable_fbdev, int, 0600); |
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); |
module_param_named(force_dma_api, vmw_force_iommu, int, 0600); |
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); |
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); |
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); |
static void vmw_print_capabilities(uint32_t capabilities) |
{ |
DRM_INFO("Capabilities:\n"); |
227,39 → 271,53 |
DRM_INFO(" GMR2.\n"); |
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
DRM_INFO(" Screen Object 2.\n"); |
if (capabilities & SVGA_CAP_COMMAND_BUFFERS) |
DRM_INFO(" Command Buffers.\n"); |
if (capabilities & SVGA_CAP_CMD_BUFFERS_2) |
DRM_INFO(" Command Buffers 2.\n"); |
if (capabilities & SVGA_CAP_GBOBJECTS) |
DRM_INFO(" Guest Backed Resources.\n"); |
} |
/** |
* vmw_execbuf_prepare_dummy_query - Initialize a query result structure at |
* the start of a buffer object. |
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
* |
* @dev_priv: The device private structure. |
* @dev_priv: A device private structure. |
* |
* This function will idle the buffer using an uninterruptible wait, then |
* map the first page and initialize a pending occlusion query result structure, |
* Finally it will unmap the buffer. |
* This function creates a small buffer object that holds the query |
* result for dummy queries emitted as query barriers. |
* The function will then map the first page and initialize a pending |
* occlusion query result structure, Finally it will unmap the buffer. |
* No interruptible waits are done within this function. |
* |
* TODO: Since we're only mapping a single page, we should optimize the map |
* to use kmap_atomic / iomap_atomic. |
* Returns an error if bo creation or initialization fails. |
*/ |
static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) |
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
{ |
int ret; |
struct ttm_buffer_object *bo; |
struct ttm_bo_kmap_obj map; |
volatile SVGA3dQueryResult *result; |
bool dummy; |
int ret; |
struct ttm_bo_device *bdev = &dev_priv->bdev; |
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; |
ttm_bo_reserve(bo, false, false, false, 0); |
spin_lock(&bdev->fence_lock); |
ret = 0; //ttm_bo_wait(bo, false, false, false); |
spin_unlock(&bdev->fence_lock); |
/* |
* Create the bo as pinned, so that a tryreserve will |
* immediately succeed. This is because we're the only |
* user of the bo currently. |
*/ |
ret = ttm_bo_create(&dev_priv->bdev, |
PAGE_SIZE, |
ttm_bo_type_device, |
&vmw_sys_ne_placement, |
0, false, NULL, |
&bo); |
if (unlikely(ret != 0)) |
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, |
10*HZ); |
/* |
return ret; |
ret = ttm_bo_reserve(bo, false, true, false, 0); |
BUG_ON(ret != 0); |
ret = ttm_bo_kmap(bo, 0, 1, &map); |
if (likely(ret == 0)) { |
result = ttm_kmap_obj_virtual(&map, &dummy); |
267,35 → 325,19 |
result->state = SVGA3D_QUERYSTATE_PENDING; |
result->result32 = 0xff; |
ttm_bo_kunmap(&map); |
} else |
DRM_ERROR("Dummy query buffer map failed.\n"); |
*/ |
} |
vmw_bo_pin(bo, false); |
ttm_bo_unreserve(bo); |
} |
if (unlikely(ret != 0)) { |
DRM_ERROR("Dummy query buffer map failed.\n"); |
ttm_bo_unref(&bo); |
} else |
dev_priv->dummy_query_bo = bo; |
/** |
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
* |
* @dev_priv: A device private structure. |
* |
* This function creates a small buffer object that holds the query |
* result for dummy queries emitted as query barriers. |
* No interruptible waits are done within this function. |
* |
* Returns an error if bo creation fails. |
*/ |
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
{ |
return ttm_bo_create(&dev_priv->bdev, |
PAGE_SIZE, |
ttm_bo_type_device, |
&vmw_vram_sys_placement, |
0, false, NULL, |
&dev_priv->dummy_query_bo); |
return ret; |
} |
static int vmw_request_device(struct vmw_private *dev_priv) |
{ |
int ret; |
336,6 → 378,7 |
vmw_fifo_release(dev_priv, &dev_priv->fifo); |
} |
/** |
* Increase the 3d resource refcount. |
* If the count was prevously zero, initialize the fifo, switching to svga |
432,6 → 475,33 |
dev_priv->initial_height = height; |
} |
/** |
* vmw_dma_masks - set required page- and dma masks |
* |
* @dev: Pointer to struct drm-device |
* |
* With 32-bit we can only handle 32 bit PFNs. Optionally set that |
* restriction also for 64-bit systems. |
*/ |
#ifdef CONFIG_INTEL_IOMMU |
static int vmw_dma_masks(struct vmw_private *dev_priv) |
{ |
struct drm_device *dev = dev_priv->dev; |
if (intel_iommu_enabled && |
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); |
} |
return 0; |
} |
#else |
static int vmw_dma_masks(struct vmw_private *dev_priv) |
{ |
return 0; |
} |
#endif |
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
{ |
struct vmw_private *dev_priv; |
438,7 → 508,9 |
int ret; |
uint32_t svga_id; |
enum vmw_res_type i; |
bool refuse_dma = false; |
ENTER(); |
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
455,6 → 527,7 |
mutex_init(&dev_priv->hw_mutex); |
mutex_init(&dev_priv->cmdbuf_mutex); |
mutex_init(&dev_priv->release_mutex); |
mutex_init(&dev_priv->binding_mutex); |
rwlock_init(&dev_priv->resource_lock); |
for (i = vmw_res_context; i < vmw_res_max; ++i) { |
491,6 → 564,11 |
} |
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
// ret = vmw_dma_select_mode(dev_priv); |
// if (unlikely(ret != 0)) { |
// DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); |
// refuse_dma = true; |
// } |
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); |
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); |
499,14 → 577,9 |
vmw_get_initial_size(dev_priv); |
if (dev_priv->capabilities & SVGA_CAP_GMR) { |
dev_priv->max_gmr_descriptors = |
vmw_read(dev_priv, |
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); |
if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
dev_priv->max_gmr_ids = |
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
} |
if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
dev_priv->max_gmr_pages = |
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
dev_priv->memory_size = |
519,23 → 592,42 |
*/ |
dev_priv->memory_size = 512*1024*1024; |
} |
dev_priv->max_mob_pages = 0; |
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
uint64_t mem_size = |
vmw_read(dev_priv, |
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); |
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; |
dev_priv->prim_bb_mem = |
vmw_read(dev_priv, |
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
} else |
dev_priv->prim_bb_mem = dev_priv->vram_size; |
ret = vmw_dma_masks(dev_priv); |
if (unlikely(ret != 0)) { |
mutex_unlock(&dev_priv->hw_mutex); |
goto out_err0; |
} |
if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) |
dev_priv->prim_bb_mem = dev_priv->vram_size; |
mutex_unlock(&dev_priv->hw_mutex); |
vmw_print_capabilities(dev_priv->capabilities); |
if (dev_priv->capabilities & SVGA_CAP_GMR) { |
if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
DRM_INFO("Max GMR ids is %u\n", |
(unsigned)dev_priv->max_gmr_ids); |
DRM_INFO("Max GMR descriptors is %u\n", |
(unsigned)dev_priv->max_gmr_descriptors); |
} |
if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
DRM_INFO("Max number of GMR pages is %u\n", |
(unsigned)dev_priv->max_gmr_pages); |
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
(unsigned)dev_priv->memory_size / 1024); |
} |
DRM_INFO("Maximum display memory size is %u kiB\n", |
dev_priv->prim_bb_mem / 1024); |
DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
dev_priv->vram_start, dev_priv->vram_size / 1024); |
DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
546,6 → 638,8 |
goto out_err0; |
vmw_master_init(&dev_priv->fbdev_master); |
dev_priv->active_master = &dev_priv->fbdev_master; |
ret = ttm_bo_device_init(&dev_priv->bdev, |
565,13 → 659,23 |
} |
dev_priv->has_gmr = true; |
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
dev_priv->max_gmr_ids) != 0) { |
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
VMW_PL_GMR) != 0) { |
DRM_INFO("No GMR memory available. " |
"Graphics memory resources are very limited.\n"); |
dev_priv->has_gmr = false; |
} |
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
dev_priv->has_mob = true; |
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, |
VMW_PL_MOB) != 0) { |
DRM_INFO("No MOB memory available. " |
"3D will be disabled.\n"); |
dev_priv->has_mob = false; |
} |
} |
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, |
dev_priv->mmio_size); |
590,14 → 694,14 |
goto out_err4; |
} |
dev_priv->tdev = ttm_object_device_init |
(dev_priv->mem_global_ref.object, 12); |
// dev_priv->tdev = ttm_object_device_init |
// (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); |
if (unlikely(dev_priv->tdev == NULL)) { |
DRM_ERROR("Unable to initialize TTM object management.\n"); |
ret = -ENOMEM; |
goto out_err4; |
} |
// if (unlikely(dev_priv->tdev == NULL)) { |
// DRM_ERROR("Unable to initialize TTM object management.\n"); |
// ret = -ENOMEM; |
// goto out_err4; |
// } |
dev->dev_private = dev_priv; |
702,6 → 806,8 |
ttm_object_device_release(&dev_priv->tdev); |
iounmap(dev_priv->mmio_virt); |
arch_phys_wc_del(dev_priv->mmio_mtrr); |
if (dev_priv->has_mob) |
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); |
if (dev_priv->has_gmr) |
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
731,9 → 837,16 |
struct vmw_fpriv *vmw_fp; |
vmw_fp = vmw_fpriv(file_priv); |
if (vmw_fp->locked_master) { |
struct vmw_master *vmaster = |
vmw_master(vmw_fp->locked_master); |
ttm_vt_unlock(&vmaster->lock); |
drm_master_put(&vmw_fp->locked_master); |
} |
ttm_object_file_release(&vmw_fp->tfile); |
if (vmw_fp->locked_master) |
drm_master_put(&vmw_fp->locked_master); |
kfree(vmw_fp); |
} |
#endif |
810,10 → 923,11 |
} |
} |
#endif |
static void vmw_master_init(struct vmw_master *vmaster) |
{ |
ttm_lock_init(&vmaster->lock); |
// ttm_lock_init(&vmaster->lock); |
INIT_LIST_HEAD(&vmaster->fb_surf); |
mutex_init(&vmaster->fb_surf_mutex); |
} |
828,7 → 942,7 |
return -ENOMEM; |
vmw_master_init(vmaster); |
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
// ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
master->driver_priv = vmaster; |
return 0; |
843,7 → 957,7 |
kfree(vmaster); |
} |
#if 0 |
static int vmw_master_set(struct drm_device *dev, |
struct drm_file *file_priv, |
bool from_open) |
918,14 → 1032,12 |
vmw_fp->locked_master = drm_master_get(file_priv->master); |
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
vmw_execbuf_release_pinned_bo(dev_priv); |
if (unlikely((ret != 0))) { |
DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
drm_master_put(&vmw_fp->locked_master); |
} |
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
vmw_execbuf_release_pinned_bo(dev_priv); |
if (!dev_priv->enable_fb) { |
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); |
1150,3 → 1262,15 |
MODULE_AUTHOR("VMware Inc. and others"); |
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
MODULE_LICENSE("GPL and additional rights"); |
void *kmemdup(const void *src, size_t len, gfp_t gfp) |
{ |
void *p; |
p = kmalloc(len, gfp); |
if (p) |
memcpy(p, src, len); |
return p; |
} |
/drivers/video/drm/vmwgfx/vmwgfx_drv.h |
---|
32,6 → 32,7 |
#include <drm/drmP.h> |
#include <drm/vmwgfx_drm.h> |
#include <drm/drm_hashtab.h> |
#include <linux/scatterlist.h> |
//#include <linux/suspend.h> |
#include <drm/ttm/ttm_bo_driver.h> |
#include <drm/ttm/ttm_object.h> |
40,9 → 41,9 |
//#include <drm/ttm/ttm_module.h> |
#include "vmwgfx_fence.h" |
#define VMWGFX_DRIVER_DATE "20120209" |
#define VMWGFX_DRIVER_DATE "20121114" |
#define VMWGFX_DRIVER_MAJOR 2 |
#define VMWGFX_DRIVER_MINOR 4 |
#define VMWGFX_DRIVER_MINOR 5 |
#define VMWGFX_DRIVER_PATCHLEVEL 0 |
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
50,14 → 51,30 |
#define VMWGFX_MAX_VALIDATIONS 2048 |
#define VMWGFX_MAX_DISPLAYS 16 |
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 |
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 |
/* |
* Perhaps we should have sysfs entries for these. |
*/ |
#define VMWGFX_NUM_GB_CONTEXT 256 |
#define VMWGFX_NUM_GB_SHADER 20000 |
#define VMWGFX_NUM_GB_SURFACE 32768 |
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS |
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ |
VMWGFX_NUM_GB_SHADER +\ |
VMWGFX_NUM_GB_SURFACE +\ |
VMWGFX_NUM_GB_SCREEN_TARGET) |
#define VMW_PL_GMR TTM_PL_PRIV0 |
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 |
#define VMW_PL_MOB TTM_PL_PRIV1 |
#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1 |
#define VMW_RES_CONTEXT ttm_driver_type0 |
#define VMW_RES_SURFACE ttm_driver_type1 |
#define VMW_RES_STREAM ttm_driver_type2 |
#define VMW_RES_FENCE ttm_driver_type3 |
#define VMW_RES_SHADER ttm_driver_type4 |
#define ioread32(addr) readl(addr) |
98,6 → 115,7 |
struct vmw_validate_buffer { |
struct ttm_validate_buffer base; |
struct drm_hash_item hash; |
bool validate_as_mob; |
}; |
struct vmw_res_func; |
114,6 → 132,7 |
const struct vmw_res_func *func; |
struct list_head lru_head; /* Protected by the resource lock */ |
struct list_head mob_head; /* Protected by @backup reserved */ |
struct list_head binding_head; /* Protected by binding_mutex */ |
void (*res_free) (struct vmw_resource *res); |
void (*hw_destroy) (struct vmw_resource *res); |
}; |
122,6 → 141,7 |
vmw_res_context, |
vmw_res_surface, |
vmw_res_stream, |
vmw_res_shader, |
vmw_res_max |
}; |
170,6 → 190,7 |
}; |
struct vmw_relocation { |
SVGAMobId *mob_loc; |
SVGAGuestPtr *location; |
uint32_t index; |
}; |
193,6 → 214,123 |
struct vmw_resource_val_node *node; |
}; |
/** |
* enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. |
*/ |
enum vmw_dma_map_mode { |
vmw_dma_phys, /* Use physical page addresses */ |
vmw_dma_alloc_coherent, /* Use TTM coherent pages */ |
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ |
vmw_dma_map_bind, /* Unmap from DMA just before unbind */ |
vmw_dma_map_max |
}; |
/** |
* struct vmw_sg_table - Scatter/gather table for binding, with additional |
* device-specific information. |
* |
* @sgt: Pointer to a struct sg_table with binding information |
* @num_regions: Number of regions with device-address contigous pages |
*/ |
struct vmw_sg_table { |
enum vmw_dma_map_mode mode; |
struct page **pages; |
const dma_addr_t *addrs; |
struct sg_table *sgt; |
unsigned long num_regions; |
unsigned long num_pages; |
}; |
/** |
* struct vmw_piter - Page iterator that iterates over a list of pages |
* and DMA addresses that could be either a scatter-gather list or |
* arrays |
* |
* @pages: Array of page pointers to the pages. |
* @addrs: DMA addresses to the pages if coherent pages are used. |
* @iter: Scatter-gather page iterator. Current position in SG list. |
* @i: Current position in arrays. |
* @num_pages: Number of pages total. |
* @next: Function to advance the iterator. Returns false if past the list |
* of pages, true otherwise. |
* @dma_address: Function to return the DMA address of the current page. |
*/ |
struct vmw_piter { |
struct page **pages; |
const dma_addr_t *addrs; |
struct sg_page_iter iter; |
unsigned long i; |
unsigned long num_pages; |
bool (*next)(struct vmw_piter *); |
dma_addr_t (*dma_address)(struct vmw_piter *); |
struct page *(*page)(struct vmw_piter *); |
}; |
/* |
* enum vmw_ctx_binding_type - abstract resource to context binding types |
*/ |
enum vmw_ctx_binding_type { |
vmw_ctx_binding_shader, |
vmw_ctx_binding_rt, |
vmw_ctx_binding_tex, |
vmw_ctx_binding_max |
}; |
/** |
* struct vmw_ctx_bindinfo - structure representing a single context binding |
* |
* @ctx: Pointer to the context structure. NULL means the binding is not |
* active. |
* @res: Non ref-counted pointer to the bound resource. |
* @bt: The binding type. |
* @i1: Union of information needed to unbind. |
*/ |
struct vmw_ctx_bindinfo { |
struct vmw_resource *ctx; |
struct vmw_resource *res; |
enum vmw_ctx_binding_type bt; |
union { |
SVGA3dShaderType shader_type; |
SVGA3dRenderTargetType rt_type; |
uint32 texture_stage; |
} i1; |
}; |
/** |
* struct vmw_ctx_binding - structure representing a single context binding |
* - suitable for tracking in a context |
* |
* @ctx_list: List head for context. |
* @res_list: List head for bound resource. |
* @bi: Binding info |
*/ |
struct vmw_ctx_binding { |
struct list_head ctx_list; |
struct list_head res_list; |
struct vmw_ctx_bindinfo bi; |
}; |
/** |
* struct vmw_ctx_binding_state - context binding state |
* |
* @list: linked list of individual bindings. |
* @render_targets: Render target bindings. |
* @texture_units: Texture units/samplers bindings. |
* @shaders: Shader bindings. |
* |
* Note that this structure also provides storage space for the individual |
* struct vmw_ctx_binding objects, so that no dynamic allocation is needed |
* for individual bindings. |
* |
*/ |
struct vmw_ctx_binding_state { |
struct list_head list; |
struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; |
struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; |
struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX]; |
}; |
struct vmw_sw_context{ |
struct drm_open_hash res_ht; |
bool res_ht_initialized; |
214,6 → 352,7 |
struct vmw_resource *last_query_ctx; |
bool needs_post_query_barrier; |
struct vmw_resource *error_resource; |
struct vmw_ctx_binding_state staged_bindings; |
}; |
struct vmw_legacy_display; |
245,6 → 384,7 |
unsigned int io_start; |
uint32_t vram_start; |
uint32_t vram_size; |
uint32_t prim_bb_mem; |
uint32_t mmio_start; |
uint32_t mmio_size; |
uint32_t fb_max_width; |
254,11 → 394,12 |
__le32 __iomem *mmio_virt; |
int mmio_mtrr; |
uint32_t capabilities; |
uint32_t max_gmr_descriptors; |
uint32_t max_gmr_ids; |
uint32_t max_gmr_pages; |
uint32_t max_mob_pages; |
uint32_t memory_size; |
bool has_gmr; |
bool has_mob; |
struct mutex hw_mutex; |
/* |
334,6 → 475,7 |
struct vmw_sw_context ctx; |
struct mutex cmdbuf_mutex; |
struct mutex binding_mutex; |
/** |
* Operating mode. |
346,8 → 488,8 |
* Master management. |
*/ |
// struct vmw_master *active_master; |
// struct vmw_master fbdev_master; |
struct vmw_master *active_master; |
struct vmw_master fbdev_master; |
// struct notifier_block pm_nb; |
bool suspended; |
374,6 → 516,17 |
struct list_head res_lru[vmw_res_max]; |
uint32_t used_memory_size; |
/* |
* DMA mapping stuff. |
*/ |
enum vmw_dma_map_mode map_mode; |
/* |
* Guest Backed stuff |
*/ |
struct ttm_buffer_object *otable_bo; |
struct vmw_otable *otables; |
}; |
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
421,7 → 574,7 |
*/ |
extern int vmw_gmr_bind(struct vmw_private *dev_priv, |
struct page *pages[], |
const struct vmw_sg_table *vsgt, |
unsigned long num_pages, |
int gmr_id); |
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); |
430,23 → 583,12 |
* Resource utilities - vmwgfx_resource.c |
*/ |
struct vmw_user_resource_conv; |
extern const struct vmw_user_resource_conv *user_surface_converter; |
extern const struct vmw_user_resource_conv *user_context_converter; |
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); |
extern void vmw_resource_unreference(struct vmw_resource **p_res); |
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
extern int vmw_resource_validate(struct vmw_resource *res); |
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); |
extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_context_check(struct vmw_private *dev_priv, |
struct ttm_object_file *tfile, |
int id, |
struct vmw_resource **p_res); |
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
struct ttm_object_file *tfile, |
uint32_t handle, |
458,18 → 600,6 |
uint32_t handle, |
const struct vmw_user_resource_conv *converter, |
struct vmw_resource **p_res); |
extern void vmw_surface_res_free(struct vmw_resource *res); |
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_surface_check(struct vmw_private *dev_priv, |
struct ttm_object_file *tfile, |
uint32_t handle, int *id); |
extern int vmw_surface_validate(struct vmw_private *dev_priv, |
struct vmw_surface *srf); |
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); |
extern int vmw_dmabuf_init(struct vmw_private *dev_priv, |
struct vmw_dma_buffer *vmw_bo, |
478,10 → 608,21 |
void (*bo_free) (struct ttm_buffer_object *bo)); |
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
struct ttm_object_file *tfile); |
extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, |
struct ttm_object_file *tfile, |
uint32_t size, |
bool shareable, |
uint32_t *handle, |
struct vmw_dma_buffer **p_dma_buf); |
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
struct vmw_dma_buffer *dma_buf, |
uint32_t *handle); |
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, |
uint32_t cur_validate_node); |
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
539,8 → 680,6 |
struct drm_file *file_priv); |
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
//extern unsigned int vmw_fops_poll(struct file *filp, |
// struct poll_table_struct *wait); |
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, |
size_t count, loff_t *offset); |
574,6 → 713,7 |
* TTM buffer object driver - vmwgfx_buffer.c |
*/ |
extern const size_t vmw_tt_size; |
extern struct ttm_placement vmw_vram_placement; |
extern struct ttm_placement vmw_vram_ne_placement; |
extern struct ttm_placement vmw_vram_sys_placement; |
580,12 → 720,57 |
extern struct ttm_placement vmw_vram_gmr_placement; |
extern struct ttm_placement vmw_vram_gmr_ne_placement; |
extern struct ttm_placement vmw_sys_placement; |
extern struct ttm_placement vmw_sys_ne_placement; |
extern struct ttm_placement vmw_evictable_placement; |
extern struct ttm_placement vmw_srf_placement; |
extern struct ttm_placement vmw_mob_placement; |
extern struct ttm_bo_driver vmw_bo_driver; |
extern int vmw_dma_quiescent(struct drm_device *dev); |
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); |
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); |
extern const struct vmw_sg_table * |
vmw_bo_sg_table(struct ttm_buffer_object *bo); |
extern void vmw_piter_start(struct vmw_piter *viter, |
const struct vmw_sg_table *vsgt, |
unsigned long p_offs); |
/** |
* vmw_piter_next - Advance the iterator one page. |
* |
* @viter: Pointer to the iterator to advance. |
* |
* Returns false if past the list of pages, true otherwise. |
*/ |
static inline bool vmw_piter_next(struct vmw_piter *viter) |
{ |
return viter->next(viter); |
} |
/** |
* vmw_piter_dma_addr - Return the DMA address of the current page. |
* |
* @viter: Pointer to the iterator |
* |
* Returns the DMA address of the page pointed to by @viter. |
*/ |
static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) |
{ |
return viter->dma_address(viter); |
} |
/** |
* vmw_piter_page - Return a pointer to the current page. |
* |
* @viter: Pointer to the iterator |
* |
* Returns the DMA address of the page pointed to by @viter. |
*/ |
static inline struct page *vmw_piter_page(struct vmw_piter *viter) |
{ |
return viter->page(viter); |
} |
/** |
* Command submission - vmwgfx_execbuf.c |
*/ |
620,7 → 805,7 |
* IRQs and wating - vmwgfx_irq.c |
*/ |
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); |
extern irqreturn_t vmw_irq_handler(int irq, void *arg); |
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, |
uint32_t seqno, bool interruptible, |
unsigned long timeout); |
739,6 → 924,62 |
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; |
/** |
/* |
* MemoryOBject management - vmwgfx_mob.c |
*/ |
struct vmw_mob; |
extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, |
const struct vmw_sg_table *vsgt, |
unsigned long num_data_pages, int32_t mob_id); |
extern void vmw_mob_unbind(struct vmw_private *dev_priv, |
struct vmw_mob *mob); |
extern void vmw_mob_destroy(struct vmw_mob *mob); |
extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); |
extern int vmw_otables_setup(struct vmw_private *dev_priv); |
extern void vmw_otables_takedown(struct vmw_private *dev_priv); |
/* |
* Context management - vmwgfx_context.c |
*/ |
extern const struct vmw_user_resource_conv *user_context_converter; |
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); |
extern int vmw_context_check(struct vmw_private *dev_priv, |
struct ttm_object_file *tfile, |
int id, |
struct vmw_resource **p_res); |
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, |
const struct vmw_ctx_bindinfo *ci); |
extern void |
vmw_context_binding_state_transfer(struct vmw_resource *res, |
struct vmw_ctx_binding_state *cbs); |
extern void vmw_context_binding_res_list_kill(struct list_head *head); |
/* |
* Surface management - vmwgfx_surface.c |
*/ |
extern const struct vmw_user_resource_conv *user_surface_converter; |
extern void vmw_surface_res_free(struct vmw_resource *res); |
extern int vmw_surface_check(struct vmw_private *dev_priv, |
struct ttm_object_file *tfile, |
uint32_t handle, int *id); |
extern int vmw_surface_validate(struct vmw_private *dev_priv, |
struct vmw_surface *srf); |
/* |
* Shader management - vmwgfx_shader.c |
*/ |
extern const struct vmw_user_resource_conv *user_shader_converter; |
/** |
* Inline helper functions |
*/ |
/drivers/video/drm/vmwgfx/vmwgfx_execbuf.c |
---|
54,6 → 54,8 |
* @res: Ref-counted pointer to the resource. |
* @switch_backup: Boolean whether to switch backup buffer on unreserve. |
* @new_backup: Refcounted pointer to the new backup buffer. |
* @staged_bindings: If @res is a context, tracks bindings set up during |
* the command batch. Otherwise NULL. |
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
* @first_usage: Set to true the first time the resource is referenced in |
* the command stream. |
65,6 → 67,7 |
struct drm_hash_item hash; |
struct vmw_resource *res; |
struct vmw_dma_buffer *new_backup; |
struct vmw_ctx_binding_state *staged_bindings; |
unsigned long new_backup_offset; |
bool first_usage; |
bool no_buffer_needed; |
71,6 → 74,25 |
}; |
/** |
* struct vmw_cmd_entry - Describe a command for the verifier |
* |
* @user_allow: Whether allowed from the execbuf ioctl. |
* @gb_disable: Whether disabled if guest-backed objects are available. |
* @gb_enable: Whether enabled iff guest-backed objects are available. |
*/ |
struct vmw_cmd_entry { |
int (*func) (struct vmw_private *, struct vmw_sw_context *, |
SVGA3dCmdHeader *); |
bool user_allow; |
bool gb_disable; |
bool gb_enable; |
}; |
#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
(_gb_disable), (_gb_enable)} |
/** |
* vmw_resource_unreserve - unreserve resources previously reserved for |
* command submission. |
* |
87,6 → 109,16 |
struct vmw_dma_buffer *new_backup = |
backoff ? NULL : val->new_backup; |
/* |
* Transfer staged context bindings to the |
* persistent context binding tracker. |
*/ |
if (unlikely(val->staged_bindings)) { |
vmw_context_binding_state_transfer |
(val->res, val->staged_bindings); |
kfree(val->staged_bindings); |
val->staged_bindings = NULL; |
} |
vmw_resource_unreserve(res, new_backup, |
val->new_backup_offset); |
vmw_dmabuf_unreference(&val->new_backup); |
224,6 → 256,7 |
* |
* @sw_context: The software context used for this command submission batch. |
* @bo: The buffer object to add. |
* @validate_as_mob: Validate this buffer as a MOB. |
* @p_val_node: If non-NULL Will be updated with the validate node number |
* on return. |
* |
232,6 → 265,7 |
*/ |
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
struct ttm_buffer_object *bo, |
bool validate_as_mob, |
uint32_t *p_val_node) |
{ |
uint32_t val_node; |
244,6 → 278,10 |
&hash) == 0)) { |
vval_buf = container_of(hash, struct vmw_validate_buffer, |
hash); |
if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { |
DRM_ERROR("Inconsistent buffer usage.\n"); |
return -EINVAL; |
} |
val_buf = &vval_buf->base; |
val_node = vval_buf - sw_context->val_bufs; |
} else { |
266,6 → 304,7 |
val_buf->bo = ttm_bo_reference(bo); |
val_buf->reserved = false; |
list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
vval_buf->validate_as_mob = validate_as_mob; |
} |
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
302,7 → 341,8 |
struct ttm_buffer_object *bo = &res->backup->base; |
ret = vmw_bo_to_validate_list |
(sw_context, bo, NULL); |
(sw_context, bo, |
vmw_resource_needs_backup(res), NULL); |
if (unlikely(ret != 0)) |
return ret; |
362,8 → 402,15 |
struct vmw_resource_val_node *node; |
int ret; |
if (*id == SVGA3D_INVALID_ID) |
if (*id == SVGA3D_INVALID_ID) { |
if (p_val) |
*p_val = NULL; |
if (res_type == vmw_res_context) { |
DRM_ERROR("Illegal context invalid id.\n"); |
return -EINVAL; |
} |
return 0; |
} |
/* |
* Fastpath in case of repeated commands referencing the same |
411,6 → 458,18 |
rcache->node = node; |
if (p_val) |
*p_val = node; |
if (node->first_usage && res_type == vmw_res_context) { |
node->staged_bindings = |
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
if (node->staged_bindings == NULL) { |
DRM_ERROR("Failed to allocate context binding " |
"information.\n"); |
goto out_no_reloc; |
} |
INIT_LIST_HEAD(&node->staged_bindings->list); |
} |
vmw_resource_unreference(&res); |
return 0; |
453,19 → 512,37 |
SVGA3dCmdHeader header; |
SVGA3dCmdSetRenderTarget body; |
} *cmd; |
struct vmw_resource_val_node *ctx_node; |
struct vmw_resource_val_node *res_node; |
int ret; |
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
cmd = container_of(header, struct vmw_sid_cmd, header); |
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
user_context_converter, &cmd->body.cid, |
&ctx_node); |
if (unlikely(ret != 0)) |
return ret; |
cmd = container_of(header, struct vmw_sid_cmd, header); |
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.target.sid, NULL); |
&cmd->body.target.sid, &res_node); |
if (unlikely(ret != 0)) |
return ret; |
if (dev_priv->has_mob) { |
struct vmw_ctx_bindinfo bi; |
bi.ctx = ctx_node->res; |
bi.res = res_node ? res_node->res : NULL; |
bi.bt = vmw_ctx_binding_rt; |
bi.i1.rt_type = cmd->body.type; |
return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
} |
return 0; |
} |
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
519,11 → 596,6 |
cmd = container_of(header, struct vmw_sid_cmd, header); |
if (unlikely(!sw_context->kernel)) { |
DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); |
return -EPERM; |
} |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.srcImage.sid, NULL); |
541,11 → 613,6 |
cmd = container_of(header, struct vmw_sid_cmd, header); |
if (unlikely(!sw_context->kernel)) { |
DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); |
return -EPERM; |
} |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, &cmd->body.sid, |
NULL); |
586,7 → 653,7 |
sw_context->needs_post_query_barrier = true; |
ret = vmw_bo_to_validate_list(sw_context, |
sw_context->cur_query_bo, |
NULL); |
dev_priv->has_mob, NULL); |
if (unlikely(ret != 0)) |
return ret; |
} |
594,7 → 661,7 |
ret = vmw_bo_to_validate_list(sw_context, |
dev_priv->dummy_query_bo, |
NULL); |
dev_priv->has_mob, NULL); |
if (unlikely(ret != 0)) |
return ret; |
672,6 → 739,66 |
} |
/** |
* vmw_translate_mob_pointer - Prepare to translate a user-space buffer |
* handle to a MOB id. |
* |
* @dev_priv: Pointer to a device private structure. |
* @sw_context: The software context used for this command batch validation. |
* @id: Pointer to the user-space handle to be translated. |
* @vmw_bo_p: Points to a location that, on successful return will carry |
* a reference-counted pointer to the DMA buffer identified by the |
* user-space handle in @id. |
* |
* This function saves information needed to translate a user-space buffer |
* handle to a MOB id. The translation does not take place immediately, but |
* during a call to vmw_apply_relocations(). This function builds a relocation |
* list and a list of buffers to validate. The former needs to be freed using |
* either vmw_apply_relocations() or vmw_free_relocations(). The latter |
* needs to be freed using vmw_clear_validations. |
*/ |
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGAMobId *id, |
struct vmw_dma_buffer **vmw_bo_p) |
{ |
struct vmw_dma_buffer *vmw_bo = NULL; |
struct ttm_buffer_object *bo; |
uint32_t handle = *id; |
struct vmw_relocation *reloc; |
int ret; |
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
if (unlikely(ret != 0)) { |
DRM_ERROR("Could not find or use MOB buffer.\n"); |
return -EINVAL; |
} |
bo = &vmw_bo->base; |
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
DRM_ERROR("Max number relocations per submission" |
" exceeded\n"); |
ret = -EINVAL; |
goto out_no_reloc; |
} |
reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
reloc->mob_loc = id; |
reloc->location = NULL; |
ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); |
if (unlikely(ret != 0)) |
goto out_no_reloc; |
*vmw_bo_p = vmw_bo; |
return 0; |
out_no_reloc: |
vmw_dmabuf_unreference(&vmw_bo); |
vmw_bo_p = NULL; |
return ret; |
} |
/** |
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
* handle to a valid SVGAGuestPtr |
* |
718,7 → 845,7 |
reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
reloc->location = ptr; |
ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); |
ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
if (unlikely(ret != 0)) |
goto out_no_reloc; |
732,6 → 859,30 |
} |
/** |
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context used for this command submission. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_begin_gb_query_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdBeginGBQuery q; |
} *cmd; |
cmd = container_of(header, struct vmw_begin_gb_query_cmd, |
header); |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
user_context_converter, &cmd->q.cid, |
NULL); |
} |
/** |
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
* |
* @dev_priv: Pointer to a device private struct. |
750,6 → 901,23 |
cmd = container_of(header, struct vmw_begin_query_cmd, |
header); |
if (unlikely(dev_priv->has_mob)) { |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdBeginGBQuery q; |
} gb_cmd; |
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
gb_cmd.header.size = cmd->header.size; |
gb_cmd.q.cid = cmd->q.cid; |
gb_cmd.q.type = cmd->q.type; |
memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
} |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
user_context_converter, &cmd->q.cid, |
NULL); |
756,6 → 924,41 |
} |
/** |
* vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context used for this command submission. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_dma_buffer *vmw_bo; |
struct vmw_query_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdEndGBQuery q; |
} *cmd; |
int ret; |
cmd = container_of(header, struct vmw_query_cmd, header); |
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
if (unlikely(ret != 0)) |
return ret; |
ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
&cmd->q.mobid, |
&vmw_bo); |
if (unlikely(ret != 0)) |
return ret; |
ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
vmw_dmabuf_unreference(&vmw_bo); |
return ret; |
} |
/** |
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
* |
* @dev_priv: Pointer to a device private struct. |
774,6 → 977,25 |
int ret; |
cmd = container_of(header, struct vmw_query_cmd, header); |
if (dev_priv->has_mob) { |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdEndGBQuery q; |
} gb_cmd; |
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
gb_cmd.header.size = cmd->header.size; |
gb_cmd.q.cid = cmd->q.cid; |
gb_cmd.q.type = cmd->q.type; |
gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
gb_cmd.q.offset = cmd->q.guestResult.offset; |
memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
} |
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
if (unlikely(ret != 0)) |
return ret; |
790,7 → 1012,40 |
return ret; |
} |
/* |
/** |
* vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context used for this command submission. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_dma_buffer *vmw_bo; |
struct vmw_query_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdWaitForGBQuery q; |
} *cmd; |
int ret; |
cmd = container_of(header, struct vmw_query_cmd, header); |
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
if (unlikely(ret != 0)) |
return ret; |
ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
&cmd->q.mobid, |
&vmw_bo); |
if (unlikely(ret != 0)) |
return ret; |
vmw_dmabuf_unreference(&vmw_bo); |
return 0; |
} |
/** |
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
* |
* @dev_priv: Pointer to a device private struct. |
809,6 → 1064,25 |
int ret; |
cmd = container_of(header, struct vmw_query_cmd, header); |
if (dev_priv->has_mob) { |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdWaitForGBQuery q; |
} gb_cmd; |
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
gb_cmd.header.size = cmd->header.size; |
gb_cmd.q.cid = cmd->q.cid; |
gb_cmd.q.type = cmd->q.type; |
gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
gb_cmd.q.offset = cmd->q.guestResult.offset; |
memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
} |
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
if (unlikely(ret != 0)) |
return ret; |
921,15 → 1195,22 |
struct vmw_tex_state_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdSetTextureState state; |
}; |
} *cmd; |
SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
((unsigned long) header + header->size + sizeof(header)); |
SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
struct vmw_resource_val_node *ctx_node; |
struct vmw_resource_val_node *res_node; |
int ret; |
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
cmd = container_of(header, struct vmw_tex_state_cmd, |
header); |
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
user_context_converter, &cmd->state.cid, |
&ctx_node); |
if (unlikely(ret != 0)) |
return ret; |
939,10 → 1220,21 |
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cur_state->value, NULL); |
&cur_state->value, &res_node); |
if (unlikely(ret != 0)) |
return ret; |
if (dev_priv->has_mob) { |
struct vmw_ctx_bindinfo bi; |
bi.ctx = ctx_node->res; |
bi.res = res_node ? res_node->res : NULL; |
bi.bt = vmw_ctx_binding_tex; |
bi.i1.texture_stage = cur_state->stage; |
vmw_context_binding_add(ctx_node->staged_bindings, |
&bi); |
} |
} |
return 0; |
} |
971,6 → 1263,222 |
} |
/** |
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @res_type: The resource type. |
* @converter: Information about user-space binding for this resource type. |
* @res_id: Pointer to the user-space resource handle in the command stream. |
* @buf_id: Pointer to the user-space backup buffer handle in the command |
* stream. |
* @backup_offset: Offset of backup into MOB. |
* |
* This function prepares for registering a switch of backup buffers |
* in the resource metadata just prior to unreserving. |
*/ |
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
enum vmw_res_type res_type, |
const struct vmw_user_resource_conv |
*converter, |
uint32_t *res_id, |
uint32_t *buf_id, |
unsigned long backup_offset) |
{ |
int ret; |
struct vmw_dma_buffer *dma_buf; |
struct vmw_resource_val_node *val_node; |
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
converter, res_id, &val_node); |
if (unlikely(ret != 0)) |
return ret; |
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); |
if (unlikely(ret != 0)) |
return ret; |
if (val_node->first_usage) |
val_node->no_buffer_needed = true; |
vmw_dmabuf_unreference(&val_node->new_backup); |
val_node->new_backup = dma_buf; |
val_node->new_backup_offset = backup_offset; |
return 0; |
} |
/** |
* vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE |
* command |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_bind_gb_surface_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdBindGBSurface body; |
} *cmd; |
cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); |
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.sid, &cmd->body.mobid, |
0); |
} |
/** |
* vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE |
* command |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_gb_surface_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdUpdateGBImage body; |
} *cmd; |
cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.image.sid, NULL); |
} |
/** |
* vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE |
* command |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_gb_surface_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdUpdateGBSurface body; |
} *cmd; |
cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.sid, NULL); |
} |
/** |
* vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE |
* command |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_gb_surface_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdReadbackGBImage body; |
} *cmd; |
cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.image.sid, NULL); |
} |
/** |
* vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE |
* command |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_gb_surface_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdReadbackGBSurface body; |
} *cmd; |
cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.sid, NULL); |
} |
/** |
* vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
* command |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_gb_surface_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdInvalidateGBImage body; |
} *cmd; |
cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.image.sid, NULL); |
} |
/** |
* vmw_cmd_invalidate_gb_surface - Validate an |
* SVGA_3D_CMD_INVALIDATE_GB_SURFACE command |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_gb_surface_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdInvalidateGBSurface body; |
} *cmd; |
cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
user_surface_converter, |
&cmd->body.sid, NULL); |
} |
/** |
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
* command |
* |
986,18 → 1494,64 |
SVGA3dCmdHeader header; |
SVGA3dCmdSetShader body; |
} *cmd; |
struct vmw_resource_val_node *ctx_node; |
int ret; |
cmd = container_of(header, struct vmw_set_shader_cmd, |
header); |
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
user_context_converter, &cmd->body.cid, |
&ctx_node); |
if (unlikely(ret != 0)) |
return ret; |
if (dev_priv->has_mob) { |
struct vmw_ctx_bindinfo bi; |
struct vmw_resource_val_node *res_node; |
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, |
user_shader_converter, |
&cmd->body.shid, &res_node); |
if (unlikely(ret != 0)) |
return ret; |
bi.ctx = ctx_node->res; |
bi.res = res_node ? res_node->res : NULL; |
bi.bt = vmw_ctx_binding_shader; |
bi.i1.shader_type = cmd->body.type; |
return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
} |
return 0; |
} |
/** |
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
* command |
* |
* @dev_priv: Pointer to a device private struct. |
* @sw_context: The software context being used for this batch. |
* @header: Pointer to the command header in the command stream. |
*/ |
static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
SVGA3dCmdHeader *header) |
{ |
struct vmw_bind_gb_shader_cmd { |
SVGA3dCmdHeader header; |
SVGA3dCmdBindGBShader body; |
} *cmd; |
cmd = container_of(header, struct vmw_bind_gb_shader_cmd, |
header); |
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, |
user_shader_converter, |
&cmd->body.shid, &cmd->body.mobid, |
cmd->body.offsetInBytes); |
} |
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context, |
void *buf, uint32_t *size) |
1041,50 → 1595,173 |
return 0; |
} |
typedef int (*vmw_cmd_func) (struct vmw_private *, |
struct vmw_sw_context *, |
SVGA3dCmdHeader *); |
#define VMW_CMD_DEF(cmd, func) \ |
[cmd - SVGA_3D_CMD_BASE] = func |
static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), |
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), |
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), |
VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), |
static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
&vmw_cmd_set_render_target_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), |
VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), |
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), |
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), |
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
&vmw_cmd_set_render_target_check, true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, |
true, true, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, |
true, true, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, |
true, true, false), |
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
true, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
&vmw_cmd_blt_surf_screen_check), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), |
VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), |
VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), |
VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), |
&vmw_cmd_blt_surf_screen_check, false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
&vmw_cmd_update_gb_surface, true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
&vmw_cmd_readback_gb_image, true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
&vmw_cmd_readback_gb_surface, true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
&vmw_cmd_invalidate_gb_image, true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
&vmw_cmd_invalidate_gb_surface, true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
false, false, false), |
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
true, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
false, false, true), |
VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
true, false, true) |
}; |
static int vmw_cmd_check(struct vmw_private *dev_priv, |
1095,6 → 1772,8 |
uint32_t size_remaining = *size; |
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
int ret; |
const struct vmw_cmd_entry *entry; |
bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
/* Handle any none 3D commands */ |
1107,20 → 1786,42 |
cmd_id -= SVGA_3D_CMD_BASE; |
if (unlikely(*size > size_remaining)) |
goto out_err; |
goto out_invalid; |
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
goto out_err; |
goto out_invalid; |
ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); |
entry = &vmw_cmd_entries[cmd_id]; |
if (unlikely(!entry->user_allow && !sw_context->kernel)) |
goto out_privileged; |
if (unlikely(entry->gb_disable && gb)) |
goto out_old; |
if (unlikely(entry->gb_enable && !gb)) |
goto out_new; |
ret = entry->func(dev_priv, sw_context, header); |
if (unlikely(ret != 0)) |
goto out_err; |
goto out_invalid; |
return 0; |
out_err: |
DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", |
out_invalid: |
DRM_ERROR("Invalid SVGA3D command: %d\n", |
cmd_id + SVGA_3D_CMD_BASE); |
return -EINVAL; |
out_privileged: |
DRM_ERROR("Privileged SVGA3D command: %d\n", |
cmd_id + SVGA_3D_CMD_BASE); |
return -EPERM; |
out_old: |
DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", |
cmd_id + SVGA_3D_CMD_BASE); |
return -EINVAL; |
out_new: |
DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", |
cmd_id + SVGA_3D_CMD_BASE); |
return -EINVAL; |
} |
static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
1174,6 → 1875,9 |
case VMW_PL_GMR: |
reloc->location->gmrId = bo->mem.start; |
break; |
case VMW_PL_MOB: |
*reloc->mob_loc = bo->mem.start; |
break; |
default: |
BUG(); |
} |
1198,6 → 1902,8 |
list_for_each_entry_safe(val, val_next, list, head) { |
list_del_init(&val->head); |
vmw_resource_unreference(&val->res); |
if (unlikely(val->staged_bindings)) |
kfree(val->staged_bindings); |
kfree(val); |
} |
} |
1224,7 → 1930,8 |
} |
static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
struct ttm_buffer_object *bo) |
struct ttm_buffer_object *bo, |
bool validate_as_mob) |
{ |
int ret; |
1238,6 → 1945,9 |
dev_priv->dummy_query_bo_pinned)) |
return 0; |
if (validate_as_mob) |
return ttm_bo_validate(bo, &vmw_mob_placement, true, false); |
/** |
* Put BO in VRAM if there is space, otherwise as a GMR. |
* If there is no space in VRAM and GMR ids are all used up, |
1259,7 → 1969,6 |
return ret; |
} |
static int vmw_validate_buffers(struct vmw_private *dev_priv, |
struct vmw_sw_context *sw_context) |
{ |
1267,7 → 1976,8 |
int ret; |
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); |
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
entry->validate_as_mob); |
if (unlikely(ret != 0)) |
return ret; |
} |
1292,7 → 2002,7 |
if (sw_context->cmd_bounce != NULL) |
vfree(sw_context->cmd_bounce); |
sw_context->cmd_bounce = KernelAlloc(sw_context->cmd_bounce_size); |
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
if (sw_context->cmd_bounce == NULL) { |
DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
1510,11 → 2220,17 |
goto out_err; |
} |
ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
if (unlikely(ret != 0)) { |
ret = -ERESTARTSYS; |
goto out_err; |
} |
cmd = vmw_fifo_reserve(dev_priv, command_size); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving fifo space for commands.\n"); |
ret = -ENOMEM; |
goto out_err; |
goto out_unlock_binding; |
} |
vmw_apply_relocations(sw_context); |
1539,6 → 2255,8 |
DRM_ERROR("Fence submission error. Syncing.\n"); |
vmw_resource_list_unreserve(&sw_context->resource_list, false); |
mutex_unlock(&dev_priv->binding_mutex); |
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
(void *) fence); |
1569,6 → 2287,8 |
return 0; |
out_unlock_binding: |
mutex_unlock(&dev_priv->binding_mutex); |
out_err: |
vmw_resource_relocations_free(&sw_context->res_relocations); |
vmw_free_relocations(sw_context); |
/drivers/video/drm/vmwgfx/vmwgfx_fence.c |
---|
271,7 → 271,7 |
spin_unlock_irq(&fman->lock); |
} |
void vmw_fences_perform_actions(struct vmw_fence_manager *fman, |
static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, |
struct list_head *list) |
{ |
struct vmw_fence_action *action, *next_action; |
897,7 → 897,7 |
* Note that the action callbacks may be executed before this function |
* returns. |
*/ |
void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, |
static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, |
struct vmw_fence_action *action) |
{ |
struct vmw_fence_manager *fman = fence->fman; |
993,7 → 993,7 |
struct drm_vmw_event_fence event; |
}; |
int vmw_event_fence_action_create(struct drm_file *file_priv, |
static int vmw_event_fence_action_create(struct drm_file *file_priv, |
struct vmw_fence_obj *fence, |
uint32_t flags, |
uint64_t user_data, |
1081,7 → 1081,8 |
*/ |
if (arg->handle) { |
struct ttm_base_object *base = |
ttm_base_object_lookup(vmw_fp->tfile, arg->handle); |
ttm_base_object_lookup_for_ref(dev_priv->tdev, |
arg->handle); |
if (unlikely(base == NULL)) { |
DRM_ERROR("Fence event invalid fence object handle " |
/drivers/video/drm/vmwgfx/vmwgfx_fifo.c |
---|
41,6 → 41,23 |
uint32_t fifo_min, hwversion; |
const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
if (!(dev_priv->capabilities & SVGA_CAP_3D)) |
return false; |
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
uint32_t result; |
if (!dev_priv->has_mob) |
return false; |
mutex_lock(&dev_priv->hw_mutex); |
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); |
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
mutex_unlock(&dev_priv->hw_mutex); |
return (result != 0); |
} |
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) |
return false; |
518,23 → 535,15 |
} |
/** |
* vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. |
* vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using |
* legacy query commands. |
* |
* @dev_priv: The device private structure. |
* @cid: The hardware context id used for the query. |
* |
* This function is used to emit a dummy occlusion query with |
* no primitives rendered between query begin and query end. |
* It's used to provide a query barrier, in order to know that when |
* this query is finished, all preceding queries are also finished. |
* |
* A Query results structure should have been initialized at the start |
* of the dev_priv->dummy_query_bo buffer object. And that buffer object |
* must also be either reserved or pinned when this function is called. |
* |
* Returns -ENOMEM on failure to reserve fifo space. |
* See the vmw_fifo_emit_dummy_query documentation. |
*/ |
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, |
static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, |
uint32_t cid) |
{ |
/* |
573,3 → 582,75 |
return 0; |
} |
/** |
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using |
* guest-backed resource query commands. |
* |
* @dev_priv: The device private structure. |
* @cid: The hardware context id used for the query. |
* |
* See the vmw_fifo_emit_dummy_query documentation. |
*/ |
static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, |
uint32_t cid) |
{ |
/* |
* A query wait without a preceding query end will |
* actually finish all queries for this cid |
* without writing to the query result structure. |
*/ |
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdWaitForGBQuery body; |
} *cmd; |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Out of fifo space for dummy query.\n"); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.cid = cid; |
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; |
BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
cmd->body.mobid = bo->mem.start; |
cmd->body.offset = 0; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
return 0; |
} |
/** |
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using |
* appropriate resource query commands. |
* |
* @dev_priv: The device private structure. |
* @cid: The hardware context id used for the query. |
* |
* This function is used to emit a dummy occlusion query with |
* no primitives rendered between query begin and query end. |
* It's used to provide a query barrier, in order to know that when |
* this query is finished, all preceding queries are also finished. |
* |
* A Query results structure should have been initialized at the start |
* of the dev_priv->dummy_query_bo buffer object. And that buffer object |
* must also be either reserved or pinned when this function is called. |
* |
* Returns -ENOMEM on failure to reserve fifo space. |
*/ |
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, |
uint32_t cid) |
{ |
if (dev_priv->has_mob) |
return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); |
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); |
} |
/drivers/video/drm/vmwgfx/vmwgfx_gmr.c |
---|
35,9 → 35,11 |
#define VMW_PPN_SIZE (sizeof(unsigned long)) |
/* A future safe maximum remap size. */ |
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) |
#define DMA_ADDR_INVALID ((dma_addr_t) 0) |
#define DMA_PAGE_INVALID 0UL |
static int vmw_gmr2_bind(struct vmw_private *dev_priv, |
struct page *pages[], |
struct vmw_piter *iter, |
unsigned long num_pages, |
int gmr_id) |
{ |
84,11 → 86,13 |
for (i = 0; i < nr; ++i) { |
if (VMW_PPN_SIZE <= 4) |
*cmd = page_to_pfn(*pages++); |
*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; |
else |
*((uint64_t *)cmd) = page_to_pfn(*pages++); |
*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >> |
PAGE_SHIFT; |
cmd += VMW_PPN_SIZE / sizeof(*cmd); |
vmw_piter_next(iter); |
} |
num_pages -= nr; |
125,32 → 129,26 |
int vmw_gmr_bind(struct vmw_private *dev_priv, |
struct page *pages[], |
const struct vmw_sg_table *vsgt, |
unsigned long num_pages, |
int gmr_id) |
{ |
struct list_head desc_pages; |
int ret; |
struct vmw_piter data_iter; |
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) |
return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id); |
vmw_piter_start(&data_iter, vsgt, 0); |
printf("%s epic fail\n",__FUNCTION__); |
if (unlikely(!vmw_piter_next(&data_iter))) |
return 0; |
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2))) |
return -EINVAL; |
return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); |
} |
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
{ |
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { |
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) |
vmw_gmr2_unbind(dev_priv, gmr_id); |
return; |
} |
mutex_lock(&dev_priv->hw_mutex); |
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); |
wmb(); |
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); |
mb(); |
mutex_unlock(&dev_priv->hw_mutex); |
} |
/drivers/video/drm/vmwgfx/vmwgfx_gmrid_manager.c |
---|
125,10 → 125,21 |
return -ENOMEM; |
spin_lock_init(&gman->lock); |
gman->max_gmr_pages = dev_priv->max_gmr_pages; |
gman->used_gmr_pages = 0; |
ida_init(&gman->gmr_ida); |
gman->max_gmr_ids = p_size; |
switch (p_size) { |
case VMW_PL_GMR: |
gman->max_gmr_ids = dev_priv->max_gmr_ids; |
gman->max_gmr_pages = dev_priv->max_gmr_pages; |
break; |
case VMW_PL_MOB: |
gman->max_gmr_ids = VMWGFX_NUM_MOB; |
gman->max_gmr_pages = dev_priv->max_mob_pages; |
break; |
default: |
BUG(); |
} |
man->priv = (void *) gman; |
return 0; |
} |
/drivers/video/drm/vmwgfx/vmwgfx_irq.c |
---|
33,7 → 33,7 |
#define VMW_FENCE_WRAP (1 << 24) |
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) |
irqreturn_t vmw_irq_handler(int irq, void *arg) |
{ |
struct drm_device *dev = (struct drm_device *)arg; |
struct vmw_private *dev_priv = vmw_priv(dev); |
/drivers/video/drm/vmwgfx/vmwgfx_kms.c |
---|
666,9 → 666,9 |
if (unlikely(surface->mip_levels[0] != 1 || |
surface->num_sizes != 1 || |
surface->sizes[0].width < mode_cmd->width || |
surface->sizes[0].height < mode_cmd->height || |
surface->sizes[0].depth != 1)) { |
surface->base_size.width < mode_cmd->width || |
surface->base_size.height < mode_cmd->height || |
surface->base_size.depth != 1)) { |
DRM_ERROR("Incompatible surface dimensions " |
"for requested mode.\n"); |
return -EINVAL; |
1517,7 → 1517,7 |
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
ret = -EINVAL; |
ret = -ENOENT; |
goto out; |
} |
1640,7 → 1640,7 |
uint32_t pitch, |
uint32_t height) |
{ |
return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; |
return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; |
} |
/drivers/video/drm/vmwgfx/vmwgfx_mob.c |
---|
0,0 → 1,653 |
/************************************************************************** |
* |
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
#include "vmwgfx_drv.h" |
/* |
* If we set up the screen target otable, screen objects stop working. |
*/ |
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) |
#ifdef CONFIG_64BIT |
#define VMW_PPN_SIZE 8 |
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 |
#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 |
#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 |
#else |
#define VMW_PPN_SIZE 4 |
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 |
#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 |
#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 |
#endif |
/* |
* struct vmw_mob - Structure containing page table and metadata for a |
* Guest Memory OBject. |
* |
* @num_pages Number of pages that make up the page table. |
* @pt_level The indirection level of the page table. 0-2. |
* @pt_root_page DMA address of the level 0 page of the page table. |
*/ |
struct vmw_mob { |
struct ttm_buffer_object *pt_bo; |
unsigned long num_pages; |
unsigned pt_level; |
dma_addr_t pt_root_page; |
uint32_t id; |
}; |
/* |
* struct vmw_otable - Guest Memory OBject table metadata |
* |
* @size: Size of the table (page-aligned). |
* @page_table: Pointer to a struct vmw_mob holding the page table. |
*/ |
struct vmw_otable { |
unsigned long size; |
struct vmw_mob *page_table; |
}; |
static int vmw_mob_pt_populate(struct vmw_private *dev_priv, |
struct vmw_mob *mob); |
static void vmw_mob_pt_setup(struct vmw_mob *mob, |
struct vmw_piter data_iter, |
unsigned long num_data_pages); |
/* |
* vmw_setup_otable_base - Issue an object table base setup command to |
* the device |
* |
* @dev_priv: Pointer to a device private structure |
* @type: Type of object table base |
* @offset Start of table offset into dev_priv::otable_bo |
* @otable Pointer to otable metadata; |
* |
* This function returns -ENOMEM if it fails to reserve fifo space, |
* and may block waiting for fifo space. |
*/ |
static int vmw_setup_otable_base(struct vmw_private *dev_priv, |
SVGAOTableType type, |
unsigned long offset, |
struct vmw_otable *otable) |
{ |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdSetOTableBase64 body; |
} *cmd; |
struct vmw_mob *mob; |
const struct vmw_sg_table *vsgt; |
struct vmw_piter iter; |
int ret; |
BUG_ON(otable->page_table != NULL); |
vsgt = vmw_bo_sg_table(dev_priv->otable_bo); |
vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); |
WARN_ON(!vmw_piter_next(&iter)); |
mob = vmw_mob_create(otable->size >> PAGE_SHIFT); |
if (unlikely(mob == NULL)) { |
DRM_ERROR("Failed creating OTable page table.\n"); |
return -ENOMEM; |
} |
if (otable->size <= PAGE_SIZE) { |
mob->pt_level = VMW_MOBFMT_PTDEPTH_0; |
mob->pt_root_page = vmw_piter_dma_addr(&iter); |
} else if (vsgt->num_regions == 1) { |
mob->pt_level = SVGA3D_MOBFMT_RANGE; |
mob->pt_root_page = vmw_piter_dma_addr(&iter); |
} else { |
ret = vmw_mob_pt_populate(dev_priv, mob); |
if (unlikely(ret != 0)) |
goto out_no_populate; |
vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); |
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; |
} |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); |
goto out_no_fifo; |
} |
memset(cmd, 0, sizeof(*cmd)); |
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.type = type; |
cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); |
cmd->body.sizeInBytes = otable->size; |
cmd->body.validSizeInBytes = 0; |
cmd->body.ptDepth = mob->pt_level; |
/* |
* The device doesn't support this, But the otable size is |
* determined at compile-time, so this BUG shouldn't trigger |
* randomly. |
*/ |
BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
otable->page_table = mob; |
return 0; |
out_no_fifo: |
out_no_populate: |
vmw_mob_destroy(mob); |
return ret; |
} |
/* |
* vmw_takedown_otable_base - Issue an object table base takedown command |
* to the device |
* |
* @dev_priv: Pointer to a device private structure |
* @type: Type of object table base |
* |
*/ |
static void vmw_takedown_otable_base(struct vmw_private *dev_priv, |
SVGAOTableType type, |
struct vmw_otable *otable) |
{ |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdSetOTableBase body; |
} *cmd; |
struct ttm_buffer_object *bo; |
if (otable->page_table == NULL) |
return; |
bo = otable->page_table->pt_bo; |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) |
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); |
memset(cmd, 0, sizeof(*cmd)); |
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.type = type; |
cmd->body.baseAddress = 0; |
cmd->body.sizeInBytes = 0; |
cmd->body.validSizeInBytes = 0; |
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
if (bo) { |
int ret; |
ret = ttm_bo_reserve(bo, false, true, false, NULL); |
BUG_ON(ret != 0); |
vmw_fence_single_bo(bo, NULL); |
ttm_bo_unreserve(bo); |
} |
vmw_mob_destroy(otable->page_table); |
otable->page_table = NULL; |
} |
/* |
* vmw_otables_setup - Set up guest backed memory object tables |
* |
* @dev_priv: Pointer to a device private structure |
* |
* Takes care of the device guest backed surface |
* initialization, by setting up the guest backed memory object tables. |
* Returns 0 on success and various error codes on failure. A succesful return |
* means the object tables can be taken down using the vmw_otables_takedown |
* function. |
*/ |
int vmw_otables_setup(struct vmw_private *dev_priv) |
{ |
unsigned long offset; |
unsigned long bo_size; |
struct vmw_otable *otables; |
SVGAOTableType i; |
int ret; |
otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), |
GFP_KERNEL); |
if (unlikely(otables == NULL)) { |
DRM_ERROR("Failed to allocate space for otable " |
"metadata.\n"); |
return -ENOMEM; |
} |
otables[SVGA_OTABLE_MOB].size = |
VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; |
otables[SVGA_OTABLE_SURFACE].size = |
VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; |
otables[SVGA_OTABLE_CONTEXT].size = |
VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; |
otables[SVGA_OTABLE_SHADER].size = |
VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; |
otables[SVGA_OTABLE_SCREEN_TARGET].size = |
VMWGFX_NUM_GB_SCREEN_TARGET * |
SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; |
bo_size = 0; |
for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { |
otables[i].size = |
(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; |
bo_size += otables[i].size; |
} |
ret = ttm_bo_create(&dev_priv->bdev, bo_size, |
ttm_bo_type_device, |
&vmw_sys_ne_placement, |
0, false, NULL, |
&dev_priv->otable_bo); |
if (unlikely(ret != 0)) |
goto out_no_bo; |
ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); |
BUG_ON(ret != 0); |
ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); |
if (unlikely(ret != 0)) |
goto out_unreserve; |
ret = vmw_bo_map_dma(dev_priv->otable_bo); |
if (unlikely(ret != 0)) |
goto out_unreserve; |
ttm_bo_unreserve(dev_priv->otable_bo); |
offset = 0; |
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { |
ret = vmw_setup_otable_base(dev_priv, i, offset, |
&otables[i]); |
if (unlikely(ret != 0)) |
goto out_no_setup; |
offset += otables[i].size; |
} |
dev_priv->otables = otables; |
return 0; |
out_unreserve: |
ttm_bo_unreserve(dev_priv->otable_bo); |
out_no_setup: |
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) |
vmw_takedown_otable_base(dev_priv, i, &otables[i]); |
ttm_bo_unref(&dev_priv->otable_bo); |
out_no_bo: |
kfree(otables); |
return ret; |
} |
/* |
* vmw_otables_takedown - Take down guest backed memory object tables |
* |
* @dev_priv: Pointer to a device private structure |
* |
* Take down the Guest Memory Object tables. |
*/ |
void vmw_otables_takedown(struct vmw_private *dev_priv) |
{ |
SVGAOTableType i; |
struct ttm_buffer_object *bo = dev_priv->otable_bo; |
int ret; |
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) |
vmw_takedown_otable_base(dev_priv, i, |
&dev_priv->otables[i]); |
ret = ttm_bo_reserve(bo, false, true, false, NULL); |
BUG_ON(ret != 0); |
vmw_fence_single_bo(bo, NULL); |
ttm_bo_unreserve(bo); |
ttm_bo_unref(&dev_priv->otable_bo); |
kfree(dev_priv->otables); |
dev_priv->otables = NULL; |
} |
/* |
* vmw_mob_calculate_pt_pages - Calculate the number of page table pages |
* needed for a guest backed memory object. |
* |
* @data_pages: Number of data pages in the memory object buffer. |
*/ |
static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) |
{ |
unsigned long data_size = data_pages * PAGE_SIZE; |
unsigned long tot_size = 0; |
while (likely(data_size > PAGE_SIZE)) { |
data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); |
data_size *= VMW_PPN_SIZE; |
tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; |
} |
return tot_size >> PAGE_SHIFT; |
} |
/* |
* vmw_mob_create - Create a mob, but don't populate it. |
* |
* @data_pages: Number of data pages of the underlying buffer object. |
*/ |
struct vmw_mob *vmw_mob_create(unsigned long data_pages) |
{ |
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); |
if (unlikely(mob == NULL)) |
return NULL; |
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); |
return mob; |
} |
/* |
* vmw_mob_pt_populate - Populate the mob pagetable |
* |
* @mob: Pointer to the mob the pagetable of which we want to |
* populate. |
* |
* This function allocates memory to be used for the pagetable, and |
* adjusts TTM memory accounting accordingly. Returns ENOMEM if |
* memory resources aren't sufficient and may cause TTM buffer objects |
* to be swapped out by using the TTM memory accounting function. |
*/ |
static int vmw_mob_pt_populate(struct vmw_private *dev_priv, |
struct vmw_mob *mob) |
{ |
int ret; |
BUG_ON(mob->pt_bo != NULL); |
ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, |
ttm_bo_type_device, |
&vmw_sys_ne_placement, |
0, false, NULL, &mob->pt_bo); |
if (unlikely(ret != 0)) |
return ret; |
ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL); |
BUG_ON(ret != 0); |
ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); |
if (unlikely(ret != 0)) |
goto out_unreserve; |
ret = vmw_bo_map_dma(mob->pt_bo); |
if (unlikely(ret != 0)) |
goto out_unreserve; |
ttm_bo_unreserve(mob->pt_bo); |
return 0; |
out_unreserve: |
ttm_bo_unreserve(mob->pt_bo); |
ttm_bo_unref(&mob->pt_bo); |
return ret; |
} |
/** |
* vmw_mob_assign_ppn - Assign a value to a page table entry |
* |
* @addr: Pointer to pointer to page table entry. |
* @val: The page table entry |
* |
* Assigns a value to a page table entry pointed to by *@addr and increments |
* *@addr according to the page table entry size. |
*/ |
#if (VMW_PPN_SIZE == 8) |
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) |
{ |
*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT); |
*addr += 2; |
} |
#else |
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) |
{ |
*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT); |
} |
#endif |
/* |
* vmw_mob_build_pt - Build a pagetable |
* |
* @data_addr: Array of DMA addresses to the underlying buffer |
* object's data pages. |
* @num_data_pages: Number of buffer object data pages. |
* @pt_pages: Array of page pointers to the page table pages. |
* |
* Returns the number of page table pages actually used. |
* Uses atomic kmaps of highmem pages to avoid TLB thrashing. |
*/ |
static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, |
unsigned long num_data_pages, |
struct vmw_piter *pt_iter) |
{ |
unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; |
unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); |
unsigned long pt_page; |
__le32 *addr, *save_addr; |
unsigned long i; |
struct page *page; |
save_addr = addr = AllocKernelSpace(4096); |
for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { |
page = vmw_piter_page(pt_iter); |
MapPage(save_addr,(addr_t)page, 3); |
for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { |
vmw_mob_assign_ppn(&addr, |
vmw_piter_dma_addr(data_iter)); |
if (unlikely(--num_data_pages == 0)) |
break; |
WARN_ON(!vmw_piter_next(data_iter)); |
} |
vmw_piter_next(pt_iter); |
} |
FreeKernelSpace(save_addr); |
return num_pt_pages; |
} |
/* |
* vmw_mob_build_pt - Set up a multilevel mob pagetable |
* |
* @mob: Pointer to a mob whose page table needs setting up. |
* @data_addr Array of DMA addresses to the buffer object's data |
* pages. |
* @num_data_pages: Number of buffer object data pages. |
* |
* Uses tail recursion to set up a multilevel mob page table. |
*/ |
static void vmw_mob_pt_setup(struct vmw_mob *mob, |
struct vmw_piter data_iter, |
unsigned long num_data_pages) |
{ |
unsigned long num_pt_pages = 0; |
struct ttm_buffer_object *bo = mob->pt_bo; |
struct vmw_piter save_pt_iter; |
struct vmw_piter pt_iter; |
const struct vmw_sg_table *vsgt; |
int ret; |
ret = ttm_bo_reserve(bo, false, true, false, NULL); |
BUG_ON(ret != 0); |
vsgt = vmw_bo_sg_table(bo); |
vmw_piter_start(&pt_iter, vsgt, 0); |
BUG_ON(!vmw_piter_next(&pt_iter)); |
mob->pt_level = 0; |
while (likely(num_data_pages > 1)) { |
++mob->pt_level; |
BUG_ON(mob->pt_level > 2); |
save_pt_iter = pt_iter; |
num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, |
&pt_iter); |
data_iter = save_pt_iter; |
num_data_pages = num_pt_pages; |
} |
mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); |
ttm_bo_unreserve(bo); |
} |
/* |
* vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. |
* |
* @mob: Pointer to a mob to destroy. |
*/ |
void vmw_mob_destroy(struct vmw_mob *mob) |
{ |
if (mob->pt_bo) |
ttm_bo_unref(&mob->pt_bo); |
kfree(mob); |
} |
/* |
* vmw_mob_unbind - Hide a mob from the device. |
* |
* @dev_priv: Pointer to a device private. |
* @mob_id: Device id of the mob to unbind. |
*/ |
void vmw_mob_unbind(struct vmw_private *dev_priv, |
struct vmw_mob *mob) |
{ |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdDestroyGBMob body; |
} *cmd; |
int ret; |
struct ttm_buffer_object *bo = mob->pt_bo; |
if (bo) { |
ret = ttm_bo_reserve(bo, false, true, false, NULL); |
/* |
* Noone else should be using this buffer. |
*/ |
BUG_ON(ret != 0); |
} |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for Memory " |
"Object unbinding.\n"); |
} |
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.mobid = mob->id; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
if (bo) { |
vmw_fence_single_bo(bo, NULL); |
ttm_bo_unreserve(bo); |
} |
vmw_3d_resource_dec(dev_priv, false); |
} |
/* |
* vmw_mob_bind - Make a mob visible to the device after first |
* populating it if necessary. |
* |
* @dev_priv: Pointer to a device private. |
* @mob: Pointer to the mob we're making visible. |
* @data_addr: Array of DMA addresses to the data pages of the underlying |
* buffer object. |
* @num_data_pages: Number of data pages of the underlying buffer |
* object. |
* @mob_id: Device id of the mob to bind |
* |
* This function is intended to be interfaced with the ttm_tt backend |
* code. |
*/ |
int vmw_mob_bind(struct vmw_private *dev_priv, |
struct vmw_mob *mob, |
const struct vmw_sg_table *vsgt, |
unsigned long num_data_pages, |
int32_t mob_id) |
{ |
int ret; |
bool pt_set_up = false; |
struct vmw_piter data_iter; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdDefineGBMob64 body; |
} *cmd; |
mob->id = mob_id; |
vmw_piter_start(&data_iter, vsgt, 0); |
if (unlikely(!vmw_piter_next(&data_iter))) |
return 0; |
if (likely(num_data_pages == 1)) { |
mob->pt_level = VMW_MOBFMT_PTDEPTH_0; |
mob->pt_root_page = vmw_piter_dma_addr(&data_iter); |
} else if (vsgt->num_regions == 1) { |
mob->pt_level = SVGA3D_MOBFMT_RANGE; |
mob->pt_root_page = vmw_piter_dma_addr(&data_iter); |
} else if (unlikely(mob->pt_bo == NULL)) { |
ret = vmw_mob_pt_populate(dev_priv, mob); |
if (unlikely(ret != 0)) |
return ret; |
vmw_mob_pt_setup(mob, data_iter, num_data_pages); |
pt_set_up = true; |
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; |
} |
(void) vmw_3d_resource_inc(dev_priv, false); |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for Memory " |
"Object binding.\n"); |
goto out_no_cmd_space; |
} |
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.mobid = mob_id; |
cmd->body.ptDepth = mob->pt_level; |
cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); |
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
return 0; |
out_no_cmd_space: |
vmw_3d_resource_dec(dev_priv, false); |
if (pt_set_up) |
ttm_bo_unref(&mob->pt_bo); |
return -ENOMEM; |
} |
/drivers/video/drm/vmwgfx/vmwgfx_resource.c |
---|
32,8 → 32,10 |
#include <drm/drmP.h> |
#include "vmwgfx_resource_priv.h" |
#define VMW_RES_EVICT_ERR_COUNT 10 |
struct vmw_user_dma_buffer { |
struct ttm_base_object base; |
struct ttm_prime_object prime; |
struct vmw_dma_buffer dma; |
}; |
213,6 → 215,7 |
res->func = func; |
INIT_LIST_HEAD(&res->lru_head); |
INIT_LIST_HEAD(&res->mob_head); |
INIT_LIST_HEAD(&res->binding_head); |
res->id = -1; |
res->backup = NULL; |
res->backup_offset = 0; |
295,7 → 298,7 |
if (unlikely(base == NULL)) |
return -EINVAL; |
if (unlikely(base->object_type != converter->object_type)) |
if (unlikely(ttm_base_object_type(base) != converter->object_type)) |
goto out_bad_resource; |
res = converter->base_obj_to_res(base); |
350,6 → 353,38 |
/** |
* Buffer management. |
*/ |
/** |
* vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers |
* |
* @dev_priv: Pointer to a struct vmw_private identifying the device. |
* @size: The requested buffer size. |
* @user: Whether this is an ordinary dma buffer or a user dma buffer. |
*/ |
static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, |
bool user) |
{ |
static size_t struct_size, user_struct_size; |
size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); |
if (unlikely(struct_size == 0)) { |
size_t backend_size = ttm_round_pot(vmw_tt_size); |
struct_size = backend_size + |
ttm_round_pot(sizeof(struct vmw_dma_buffer)); |
user_struct_size = backend_size + |
ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); |
} |
if (dev_priv->map_mode == vmw_dma_alloc_coherent) |
page_array_size += |
ttm_round_pot(num_pages * sizeof(dma_addr_t)); |
return ((user) ? user_struct_size : struct_size) + |
page_array_size; |
} |
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) |
{ |
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
357,6 → 392,13 |
kfree(vmw_bo); |
} |
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
{ |
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
// ttm_prime_object_kfree(vmw_user_bo, prime); |
} |
int vmw_dmabuf_init(struct vmw_private *dev_priv, |
struct vmw_dma_buffer *vmw_bo, |
size_t size, struct ttm_placement *placement, |
366,28 → 408,23 |
struct ttm_bo_device *bdev = &dev_priv->bdev; |
size_t acc_size; |
int ret; |
bool user = (bo_free == &vmw_user_dmabuf_destroy); |
BUG_ON(!bo_free); |
BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); |
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); |
acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); |
memset(vmw_bo, 0, sizeof(*vmw_bo)); |
INIT_LIST_HEAD(&vmw_bo->res_list); |
ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
ttm_bo_type_device, placement, |
(user) ? ttm_bo_type_device : |
ttm_bo_type_kernel, placement, |
0, interruptible, |
NULL, acc_size, NULL, bo_free); |
return ret; |
} |
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
{ |
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
// ttm_base_object_kfree(vmw_user_bo, base); |
} |
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) |
{ |
struct vmw_user_dma_buffer *vmw_user_bo; |
399,11 → 436,27 |
if (unlikely(base == NULL)) |
return; |
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); |
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
prime.base); |
bo = &vmw_user_bo->dma.base; |
ttm_bo_unref(&bo); |
} |
static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, |
enum ttm_ref_type ref_type) |
{ |
struct vmw_user_dma_buffer *user_bo; |
user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); |
switch (ref_type) { |
case TTM_REF_SYNCCPU_WRITE: |
ttm_bo_synccpu_write_release(&user_bo->dma.base); |
break; |
default: |
BUG(); |
} |
} |
/** |
* vmw_user_dmabuf_alloc - Allocate a user dma buffer |
* |
434,6 → 487,8 |
} |
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, |
(dev_priv->has_mob) ? |
&vmw_sys_placement : |
&vmw_vram_sys_placement, true, |
&vmw_user_dmabuf_destroy); |
if (unlikely(ret != 0)) |
440,18 → 495,22 |
return ret; |
tmp = ttm_bo_reference(&user_bo->dma.base); |
ret = ttm_base_object_init(tfile, |
&user_bo->base, |
/* |
ret = ttm_prime_object_init(tfile, |
size, |
&user_bo->prime, |
shareable, |
ttm_buffer_type, |
&vmw_user_dmabuf_release, NULL); |
&vmw_user_dmabuf_release, |
&vmw_user_dmabuf_ref_obj_release); |
if (unlikely(ret != 0)) { |
ttm_bo_unref(&tmp); |
goto out_no_base_object; |
} |
*/ |
*p_dma_buf = &user_bo->dma; |
*handle = user_bo->base.hash.key; |
*handle = user_bo->prime.base.hash.key; |
out_no_base_object: |
return ret; |
473,10 → 532,134 |
return -EPERM; |
vmw_user_bo = vmw_user_dma_buffer(bo); |
return (vmw_user_bo->base.tfile == tfile || |
vmw_user_bo->base.shareable) ? 0 : -EPERM; |
return (vmw_user_bo->prime.base.tfile == tfile || |
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; |
} |
/** |
* vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu |
* access, idling previous GPU operations on the buffer and optionally |
* blocking it for further command submissions. |
* |
* @user_bo: Pointer to the buffer object being grabbed for CPU access |
* @tfile: Identifying the caller. |
* @flags: Flags indicating how the grab should be performed. |
* |
* A blocking grab will be automatically released when @tfile is closed. |
*/ |
static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, |
struct ttm_object_file *tfile, |
uint32_t flags) |
{ |
struct ttm_buffer_object *bo = &user_bo->dma.base; |
bool existed; |
int ret=0; |
if (flags & drm_vmw_synccpu_allow_cs) { |
struct ttm_bo_device *bdev = bo->bdev; |
// spin_lock(&bdev->fence_lock); |
// ret = ttm_bo_wait(bo, false, true, |
// !!(flags & drm_vmw_synccpu_dontblock)); |
// spin_unlock(&bdev->fence_lock); |
return ret; |
} |
// ret = ttm_bo_synccpu_write_grab |
// (bo, !!(flags & drm_vmw_synccpu_dontblock)); |
// if (unlikely(ret != 0)) |
// return ret; |
ret = ttm_ref_object_add(tfile, &user_bo->prime.base, |
TTM_REF_SYNCCPU_WRITE, &existed); |
// if (ret != 0 || existed) |
// ttm_bo_synccpu_write_release(&user_bo->dma.base); |
return ret; |
} |
/** |
* vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, |
* and unblock command submission on the buffer if blocked. |
* |
* @handle: Handle identifying the buffer object. |
* @tfile: Identifying the caller. |
* @flags: Flags indicating the type of release. |
*/ |
static int vmw_user_dmabuf_synccpu_release(uint32_t handle, |
struct ttm_object_file *tfile, |
uint32_t flags) |
{ |
if (!(flags & drm_vmw_synccpu_allow_cs)) |
return ttm_ref_object_base_unref(tfile, handle, |
TTM_REF_SYNCCPU_WRITE); |
return 0; |
} |
/** |
* vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu |
* functionality. |
* |
* @dev: Identifies the drm device. |
* @data: Pointer to the ioctl argument. |
* @file_priv: Identifies the caller. |
* |
* This function checks the ioctl arguments for validity and calls the |
* relevant synccpu functions. |
*/ |
int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
struct drm_vmw_synccpu_arg *arg = |
(struct drm_vmw_synccpu_arg *) data; |
struct vmw_dma_buffer *dma_buf; |
struct vmw_user_dma_buffer *user_bo; |
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
int ret; |
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 |
|| (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | |
drm_vmw_synccpu_dontblock | |
drm_vmw_synccpu_allow_cs)) != 0) { |
DRM_ERROR("Illegal synccpu flags.\n"); |
return -EINVAL; |
} |
switch (arg->op) { |
case drm_vmw_synccpu_grab: |
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); |
if (unlikely(ret != 0)) |
return ret; |
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, |
dma); |
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); |
vmw_dmabuf_unreference(&dma_buf); |
if (unlikely(ret != 0 && ret != -ERESTARTSYS && |
ret != -EBUSY)) { |
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
(unsigned int) arg->handle); |
return ret; |
} |
break; |
case drm_vmw_synccpu_release: |
ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, |
arg->flags); |
if (unlikely(ret != 0)) { |
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", |
(unsigned int) arg->handle); |
return ret; |
} |
break; |
default: |
DRM_ERROR("Invalid synccpu operation.\n"); |
return -EINVAL; |
} |
return 0; |
} |
#if 0 |
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
538,7 → 721,7 |
return -ESRCH; |
} |
if (unlikely(base->object_type != ttm_buffer_type)) { |
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { |
ttm_base_object_unref(&base); |
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", |
(unsigned long)handle); |
545,7 → 728,8 |
return -EINVAL; |
} |
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); |
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
prime.base); |
(void)ttm_bo_reference(&vmw_user_bo->dma.base); |
ttm_base_object_unref(&base); |
*out = &vmw_user_bo->dma; |
554,7 → 738,8 |
} |
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
struct vmw_dma_buffer *dma_buf) |
struct vmw_dma_buffer *dma_buf, |
uint32_t *handle) |
{ |
struct vmw_user_dma_buffer *user_bo; |
562,7 → 747,10 |
return -EINVAL; |
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); |
return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); |
*handle = user_bo->prime.base.hash.key; |
return ttm_ref_object_add(tfile, &user_bo->prime.base, |
TTM_REF_USAGE, NULL); |
} |
/* |
785,42 → 973,23 |
{ |
struct vmw_private *dev_priv = vmw_priv(dev); |
struct vmw_master *vmaster = vmw_master(file_priv->master); |
struct vmw_user_dma_buffer *vmw_user_bo; |
struct ttm_buffer_object *tmp; |
struct vmw_dma_buffer *dma_buf; |
int ret; |
args->pitch = args->width * ((args->bpp + 7) / 8); |
args->size = args->pitch * args->height; |
vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); |
if (vmw_user_bo == NULL) |
return -ENOMEM; |
ret = ttm_read_lock(&vmaster->lock, true); |
if (ret != 0) { |
kfree(vmw_user_bo); |
if (unlikely(ret != 0)) |
return ret; |
} |
ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, |
&vmw_vram_sys_placement, true, |
&vmw_user_dmabuf_destroy); |
if (ret != 0) |
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
args->size, false, &args->handle, |
&dma_buf); |
if (unlikely(ret != 0)) |
goto out_no_dmabuf; |
tmp = ttm_bo_reference(&vmw_user_bo->dma.base); |
ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, |
&vmw_user_bo->base, |
false, |
ttm_buffer_type, |
&vmw_user_dmabuf_release, NULL); |
if (unlikely(ret != 0)) |
goto out_no_base_object; |
args->handle = vmw_user_bo->base.hash.key; |
out_no_base_object: |
ttm_bo_unref(&tmp); |
vmw_dmabuf_unreference(&dma_buf); |
out_no_dmabuf: |
ttm_read_unlock(&vmaster->lock); |
return ret; |
827,6 → 996,16 |
} |
#endif |
/** |
* vmw_dumb_map_offset - Return the address space offset of a dumb buffer |
* |
* @file_priv: Pointer to a struct drm_file identifying the caller. |
* @dev: Pointer to the drm device. |
* @handle: Handle identifying the dumb buffer. |
* @offset: The address space offset returned. |
* |
* This is a driver callback for the core drm dumb_map_offset functionality. |
*/ |
int vmw_dumb_map_offset(struct drm_file *file_priv, |
struct drm_device *dev, uint32_t handle, |
uint64_t *offset) |
844,6 → 1023,15 |
return 0; |
} |
/** |
* vmw_dumb_destroy - Destroy a dumb boffer |
* |
* @file_priv: Pointer to a struct drm_file identifying the caller. |
* @dev: Pointer to the drm device. |
* @handle: Handle identifying the dumb buffer. |
* |
* This is a driver callback for the core drm dumb_destroy functionality. |
*/ |
int vmw_dumb_destroy(struct drm_file *file_priv, |
struct drm_device *dev, |
uint32_t handle) |
975,7 → 1163,7 |
if (new_backup) |
res->backup_offset = new_backup_offset; |
if (!res->func->may_evict) |
if (!res->func->may_evict || res->id == -1) |
return; |
write_lock(&dev_priv->resource_lock); |
997,7 → 1185,6 |
*/ |
static int |
vmw_resource_check_buffer(struct vmw_resource *res, |
struct ww_acquire_ctx *ticket, |
bool interruptible, |
struct ttm_validate_buffer *val_buf) |
{ |
1014,7 → 1201,7 |
INIT_LIST_HEAD(&val_list); |
val_buf->bo = ttm_bo_reference(&res->backup->base); |
list_add_tail(&val_buf->head, &val_list); |
ret = ttm_eu_reserve_buffers(ticket, &val_list); |
ret = ttm_eu_reserve_buffers(NULL, &val_list); |
if (unlikely(ret != 0)) |
goto out_no_reserve; |
1032,7 → 1219,7 |
return 0; |
out_no_validate: |
ttm_eu_backoff_reservation(ticket, &val_list); |
ttm_eu_backoff_reservation(NULL, &val_list); |
out_no_reserve: |
ttm_bo_unref(&val_buf->bo); |
if (backup_dirty) |
1077,8 → 1264,7 |
* @val_buf: Backup buffer information. |
*/ |
static void |
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, |
struct ttm_validate_buffer *val_buf) |
vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) |
{ |
struct list_head val_list; |
1087,7 → 1273,7 |
INIT_LIST_HEAD(&val_list); |
list_add_tail(&val_buf->head, &val_list); |
ttm_eu_backoff_reservation(ticket, &val_list); |
ttm_eu_backoff_reservation(NULL, &val_list); |
ttm_bo_unref(&val_buf->bo); |
} |
1096,18 → 1282,18 |
* to a backup buffer. |
* |
* @res: The resource to evict. |
* @interruptible: Whether to wait interruptible. |
*/ |
int vmw_resource_do_evict(struct vmw_resource *res) |
int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) |
{ |
struct ttm_validate_buffer val_buf; |
const struct vmw_res_func *func = res->func; |
struct ww_acquire_ctx ticket; |
int ret; |
BUG_ON(!func->may_evict); |
val_buf.bo = NULL; |
ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); |
ret = vmw_resource_check_buffer(res, interruptible, &val_buf); |
if (unlikely(ret != 0)) |
return ret; |
1122,7 → 1308,7 |
res->backup_dirty = true; |
res->res_dirty = false; |
out_no_unbind: |
vmw_resource_backoff_reservation(&ticket, &val_buf); |
vmw_resource_backoff_reservation(&val_buf); |
return ret; |
} |
1146,6 → 1332,7 |
struct vmw_private *dev_priv = res->dev_priv; |
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; |
struct ttm_validate_buffer val_buf; |
unsigned err_count = 0; |
if (likely(!res->func->may_evict)) |
return 0; |
1160,7 → 1347,7 |
write_lock(&dev_priv->resource_lock); |
if (list_empty(lru_list) || !res->func->may_evict) { |
DRM_ERROR("Out of device device id entries " |
DRM_ERROR("Out of device device resources " |
"for %s.\n", res->func->type_name); |
ret = -EBUSY; |
write_unlock(&dev_priv->resource_lock); |
1173,8 → 1360,20 |
list_del_init(&evict_res->lru_head); |
write_unlock(&dev_priv->resource_lock); |
vmw_resource_do_evict(evict_res); |
ret = vmw_resource_do_evict(evict_res, true); |
if (unlikely(ret != 0)) { |
write_lock(&dev_priv->resource_lock); |
list_add_tail(&evict_res->lru_head, lru_list); |
write_unlock(&dev_priv->resource_lock); |
if (ret == -ERESTARTSYS || |
++err_count > VMW_RES_EVICT_ERR_COUNT) { |
vmw_resource_unreference(&evict_res); |
goto out_no_validate; |
} |
} |
vmw_resource_unreference(&evict_res); |
} while (1); |
if (unlikely(ret != 0)) |
1234,7 → 1433,17 |
* @mem: The truct ttm_mem_reg indicating to what memory |
* region the move is taking place. |
* |
* For now does nothing. |
* Evicts the Guest Backed hardware resource if the backup |
* buffer is being moved out of MOB memory. |
* Note that this function should not race with the resource |
* validation code as long as it accesses only members of struct |
* resource that remain static while bo::res is !NULL and |
* while we have @bo reserved. struct resource::backup is *not* a |
* static member. The resource validation code will take care |
* to set @bo::res to NULL, while having @bo reserved when the |
* buffer is no longer bound to the resource, so @bo:res can be |
* used to determine whether there is a need to unbind and whether |
* it is safe to unbind. |
*/ |
void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
struct ttm_mem_reg *mem) |
1258,7 → 1467,7 |
* @type: The resource type to evict |
* |
* To avoid thrashing starvation or as part of the hibernation sequence, |
* evict all evictable resources of a specific type. |
* try to evict all evictable resources of a specific type. |
*/ |
static void vmw_resource_evict_type(struct vmw_private *dev_priv, |
enum vmw_res_type type) |
1265,6 → 1474,8 |
{ |
struct list_head *lru_list = &dev_priv->res_lru[type]; |
struct vmw_resource *evict_res; |
unsigned err_count = 0; |
int ret; |
do { |
write_lock(&dev_priv->resource_lock); |
1277,8 → 1488,19 |
lru_head)); |
list_del_init(&evict_res->lru_head); |
write_unlock(&dev_priv->resource_lock); |
vmw_resource_do_evict(evict_res); |
ret = vmw_resource_do_evict(evict_res, false); |
if (unlikely(ret != 0)) { |
write_lock(&dev_priv->resource_lock); |
list_add_tail(&evict_res->lru_head, lru_list); |
write_unlock(&dev_priv->resource_lock); |
if (++err_count > VMW_RES_EVICT_ERR_COUNT) { |
vmw_resource_unreference(&evict_res); |
return; |
} |
} |
vmw_resource_unreference(&evict_res); |
} while (1); |
out_unlock: |
/drivers/video/drm/vmwgfx/vmwgfx_scrn.c |
---|
317,6 → 317,7 |
crtc->fb = NULL; |
crtc->x = 0; |
crtc->y = 0; |
crtc->enabled = false; |
vmw_sou_del_active(dev_priv, sou); |
377,6 → 378,7 |
crtc->fb = NULL; |
crtc->x = 0; |
crtc->y = 0; |
crtc->enabled = false; |
return ret; |
} |
389,6 → 391,7 |
crtc->fb = fb; |
crtc->x = set->x; |
crtc->y = set->y; |
crtc->enabled = true; |
return 0; |
} |
510,9 → 513,6 |
dev_priv->sou_priv->num_implicit = 0; |
dev_priv->sou_priv->implicit_fb = NULL; |
// ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); |
// if (unlikely(ret != 0)) |
// goto err_free; |
ret = drm_mode_create_dirty_info_property(dev); |
if (unlikely(ret != 0)) |
/drivers/video/drm/vmwgfx/vmwgfx_shader.c |
---|
0,0 → 1,443 |
/************************************************************************** |
* |
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
#include "vmwgfx_drv.h" |
#include "vmwgfx_resource_priv.h" |
#include "ttm/ttm_placement.h" |
struct vmw_shader { |
struct vmw_resource res; |
SVGA3dShaderType type; |
uint32_t size; |
}; |
struct vmw_user_shader { |
struct ttm_base_object base; |
struct vmw_shader shader; |
}; |
static void vmw_user_shader_free(struct vmw_resource *res); |
static struct vmw_resource * |
vmw_user_shader_base_to_res(struct ttm_base_object *base); |
static int vmw_gb_shader_create(struct vmw_resource *res); |
static int vmw_gb_shader_bind(struct vmw_resource *res, |
struct ttm_validate_buffer *val_buf); |
static int vmw_gb_shader_unbind(struct vmw_resource *res, |
bool readback, |
struct ttm_validate_buffer *val_buf); |
static int vmw_gb_shader_destroy(struct vmw_resource *res); |
static uint64_t vmw_user_shader_size; |
static const struct vmw_user_resource_conv user_shader_conv = { |
.object_type = VMW_RES_SHADER, |
.base_obj_to_res = vmw_user_shader_base_to_res, |
.res_free = vmw_user_shader_free |
}; |
const struct vmw_user_resource_conv *user_shader_converter = |
&user_shader_conv; |
static const struct vmw_res_func vmw_gb_shader_func = { |
.res_type = vmw_res_shader, |
.needs_backup = true, |
.may_evict = true, |
.type_name = "guest backed shaders", |
.backup_placement = &vmw_mob_placement, |
.create = vmw_gb_shader_create, |
.destroy = vmw_gb_shader_destroy, |
.bind = vmw_gb_shader_bind, |
.unbind = vmw_gb_shader_unbind |
}; |
/** |
* Shader management: |
*/ |
static inline struct vmw_shader * |
vmw_res_to_shader(struct vmw_resource *res) |
{ |
return container_of(res, struct vmw_shader, res); |
} |
static void vmw_hw_shader_destroy(struct vmw_resource *res) |
{ |
(void) vmw_gb_shader_destroy(res); |
} |
static int vmw_gb_shader_init(struct vmw_private *dev_priv, |
struct vmw_resource *res, |
uint32_t size, |
uint64_t offset, |
SVGA3dShaderType type, |
struct vmw_dma_buffer *byte_code, |
void (*res_free) (struct vmw_resource *res)) |
{ |
struct vmw_shader *shader = vmw_res_to_shader(res); |
int ret; |
ret = vmw_resource_init(dev_priv, res, true, |
res_free, &vmw_gb_shader_func); |
if (unlikely(ret != 0)) { |
if (res_free) |
res_free(res); |
else |
kfree(res); |
return ret; |
} |
res->backup_size = size; |
if (byte_code) { |
res->backup = vmw_dmabuf_reference(byte_code); |
res->backup_offset = offset; |
} |
shader->size = size; |
shader->type = type; |
vmw_resource_activate(res, vmw_hw_shader_destroy); |
return 0; |
} |
static int vmw_gb_shader_create(struct vmw_resource *res) |
{ |
struct vmw_private *dev_priv = res->dev_priv; |
struct vmw_shader *shader = vmw_res_to_shader(res); |
int ret; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdDefineGBShader body; |
} *cmd; |
if (likely(res->id != -1)) |
return 0; |
ret = vmw_resource_alloc_id(res); |
if (unlikely(ret != 0)) { |
DRM_ERROR("Failed to allocate a shader id.\n"); |
goto out_no_id; |
} |
if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) { |
ret = -EBUSY; |
goto out_no_fifo; |
} |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for shader " |
"creation.\n"); |
ret = -ENOMEM; |
goto out_no_fifo; |
} |
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.shid = res->id; |
cmd->body.type = shader->type; |
cmd->body.sizeInBytes = shader->size; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
(void) vmw_3d_resource_inc(dev_priv, false); |
return 0; |
out_no_fifo: |
vmw_resource_release_id(res); |
out_no_id: |
return ret; |
} |
static int vmw_gb_shader_bind(struct vmw_resource *res, |
struct ttm_validate_buffer *val_buf) |
{ |
struct vmw_private *dev_priv = res->dev_priv; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdBindGBShader body; |
} *cmd; |
struct ttm_buffer_object *bo = val_buf->bo; |
BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for shader " |
"binding.\n"); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.shid = res->id; |
cmd->body.mobid = bo->mem.start; |
cmd->body.offsetInBytes = 0; |
res->backup_dirty = false; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
return 0; |
} |
static int vmw_gb_shader_unbind(struct vmw_resource *res, |
bool readback, |
struct ttm_validate_buffer *val_buf) |
{ |
struct vmw_private *dev_priv = res->dev_priv; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdBindGBShader body; |
} *cmd; |
struct vmw_fence_obj *fence; |
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for shader " |
"unbinding.\n"); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.shid = res->id; |
cmd->body.mobid = SVGA3D_INVALID_ID; |
cmd->body.offsetInBytes = 0; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
/* |
* Create a fence object and fence the backup buffer. |
*/ |
(void) vmw_execbuf_fence_commands(NULL, dev_priv, |
&fence, NULL); |
vmw_fence_single_bo(val_buf->bo, fence); |
if (likely(fence != NULL)) |
vmw_fence_obj_unreference(&fence); |
return 0; |
} |
static int vmw_gb_shader_destroy(struct vmw_resource *res) |
{ |
struct vmw_private *dev_priv = res->dev_priv; |
struct { |
SVGA3dCmdHeader header; |
SVGA3dCmdDestroyGBShader body; |
} *cmd; |
if (likely(res->id == -1)) |
return 0; |
mutex_lock(&dev_priv->binding_mutex); |
vmw_context_binding_res_list_kill(&res->binding_head); |
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
if (unlikely(cmd == NULL)) { |
DRM_ERROR("Failed reserving FIFO space for shader " |
"destruction.\n"); |
mutex_unlock(&dev_priv->binding_mutex); |
return -ENOMEM; |
} |
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; |
cmd->header.size = sizeof(cmd->body); |
cmd->body.shid = res->id; |
vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
mutex_unlock(&dev_priv->binding_mutex); |
vmw_resource_release_id(res); |
vmw_3d_resource_dec(dev_priv, false); |
return 0; |
} |
/** |
* User-space shader management: |
*/ |
static struct vmw_resource * |
vmw_user_shader_base_to_res(struct ttm_base_object *base) |
{ |
return &(container_of(base, struct vmw_user_shader, base)-> |
shader.res); |
} |
static void vmw_user_shader_free(struct vmw_resource *res) |
{ |
struct vmw_user_shader *ushader = |
container_of(res, struct vmw_user_shader, shader.res); |
struct vmw_private *dev_priv = res->dev_priv; |
// ttm_base_object_kfree(ushader, base); |
// ttm_mem_global_free(vmw_mem_glob(dev_priv), |
// vmw_user_shader_size); |
} |
/** |
* This function is called when user space has no more references on the |
* base object. It releases the base-object's reference on the resource object. |
*/ |
static void vmw_user_shader_base_release(struct ttm_base_object **p_base) |
{ |
struct ttm_base_object *base = *p_base; |
struct vmw_resource *res = vmw_user_shader_base_to_res(base); |
*p_base = NULL; |
vmw_resource_unreference(&res); |
} |
int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; |
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
return ttm_ref_object_base_unref(tfile, arg->handle, |
TTM_REF_USAGE); |
} |
#if 0 |
int vmw_shader_define_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
struct vmw_private *dev_priv = vmw_priv(dev); |
struct vmw_user_shader *ushader; |
struct vmw_resource *res; |
struct vmw_resource *tmp; |
struct drm_vmw_shader_create_arg *arg = |
(struct drm_vmw_shader_create_arg *)data; |
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
struct vmw_master *vmaster = vmw_master(file_priv->master); |
struct vmw_dma_buffer *buffer = NULL; |
SVGA3dShaderType shader_type; |
int ret; |
if (arg->buffer_handle != SVGA3D_INVALID_ID) { |
ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, |
&buffer); |
if (unlikely(ret != 0)) { |
DRM_ERROR("Could not find buffer for shader " |
"creation.\n"); |
return ret; |
} |
if ((u64)buffer->base.num_pages * PAGE_SIZE < |
(u64)arg->size + (u64)arg->offset) { |
DRM_ERROR("Illegal buffer- or shader size.\n"); |
ret = -EINVAL; |
goto out_bad_arg; |
} |
} |
switch (arg->shader_type) { |
case drm_vmw_shader_type_vs: |
shader_type = SVGA3D_SHADERTYPE_VS; |
break; |
case drm_vmw_shader_type_ps: |
shader_type = SVGA3D_SHADERTYPE_PS; |
break; |
case drm_vmw_shader_type_gs: |
shader_type = SVGA3D_SHADERTYPE_GS; |
break; |
default: |
DRM_ERROR("Illegal shader type.\n"); |
ret = -EINVAL; |
goto out_bad_arg; |
} |
/* |
* Approximate idr memory usage with 128 bytes. It will be limited |
* by maximum number_of shaders anyway. |
*/ |
if (unlikely(vmw_user_shader_size == 0)) |
vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) |
+ 128; |
ret = ttm_read_lock(&vmaster->lock, true); |
if (unlikely(ret != 0)) |
return ret; |
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), |
vmw_user_shader_size, |
false, true); |
if (unlikely(ret != 0)) { |
if (ret != -ERESTARTSYS) |
DRM_ERROR("Out of graphics memory for shader" |
" creation.\n"); |
goto out_unlock; |
} |
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); |
if (unlikely(ushader == NULL)) { |
ttm_mem_global_free(vmw_mem_glob(dev_priv), |
vmw_user_shader_size); |
ret = -ENOMEM; |
goto out_unlock; |
} |
res = &ushader->shader.res; |
ushader->base.shareable = false; |
ushader->base.tfile = NULL; |
/* |
* From here on, the destructor takes over resource freeing. |
*/ |
ret = vmw_gb_shader_init(dev_priv, res, arg->size, |
arg->offset, shader_type, buffer, |
vmw_user_shader_free); |
if (unlikely(ret != 0)) |
goto out_unlock; |
tmp = vmw_resource_reference(res); |
ret = ttm_base_object_init(tfile, &ushader->base, false, |
VMW_RES_SHADER, |
&vmw_user_shader_base_release, NULL); |
if (unlikely(ret != 0)) { |
vmw_resource_unreference(&tmp); |
goto out_err; |
} |
arg->shader_handle = ushader->base.hash.key; |
out_err: |
vmw_resource_unreference(&res); |
out_unlock: |
ttm_read_unlock(&vmaster->lock); |
out_bad_arg: |
vmw_dmabuf_unreference(&buffer); |
return ret; |
} |
#endif |
/drivers/video/drm/vmwgfx/vmwgfx_surface.c |
---|
38,10 → 38,9 |
* @size: TTM accounting size for the surface. |
*/ |
struct vmw_user_surface { |
struct ttm_base_object base; |
struct ttm_prime_object prime; |
struct vmw_surface srf; |
uint32_t size; |
uint32_t backup_handle; |
}; |
/** |
68,7 → 67,15 |
struct ttm_validate_buffer *val_buf); |
static int vmw_legacy_srf_create(struct vmw_resource *res); |
static int vmw_legacy_srf_destroy(struct vmw_resource *res); |
static int vmw_gb_surface_create(struct vmw_resource *res); |
static int vmw_gb_surface_bind(struct vmw_resource *res, |
struct ttm_validate_buffer *val_buf); |
static int vmw_gb_surface_unbind(struct vmw_resource *res, |
bool readback, |
struct ttm_validate_buffer *val_buf); |
static int vmw_gb_surface_destroy(struct vmw_resource *res); |
static const struct vmw_user_resource_conv user_surface_conv = { |
.object_type = VMW_RES_SURFACE, |
.base_obj_to_res = vmw_user_surface_base_to_res, |
93,6 → 100,18 |
.unbind = &vmw_legacy_srf_unbind |
}; |
static const struct vmw_res_func vmw_gb_surface_func = { |
.res_type = vmw_res_surface, |
.needs_backup = true, |
.may_evict = true, |
.type_name = "guest backed surfaces", |
.backup_placement = &vmw_mob_placement, |
.create = vmw_gb_surface_create, |
.destroy = vmw_gb_surface_destroy, |
.bind = vmw_gb_surface_bind, |
.unbind = vmw_gb_surface_unbind |
}; |
/** |
* struct vmw_surface_dma - SVGA3D DMA command |
*/ |
291,6 → 310,11 |
struct vmw_surface *srf; |
void *cmd; |
if (res->func->destroy == vmw_gb_surface_destroy) { |
(void) vmw_gb_surface_destroy(res); |
return; |
} |
if (res->id != -1) { |
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
549,11 → 573,14 |
struct vmw_resource *res = &srf->res; |
BUG_ON(res_free == NULL); |
if (!dev_priv->has_mob) |
(void) vmw_3d_resource_inc(dev_priv, false); |
ret = vmw_resource_init(dev_priv, res, true, res_free, |
(dev_priv->has_mob) ? &vmw_gb_surface_func : |
&vmw_legacy_surface_func); |
if (unlikely(ret != 0)) { |
if (!dev_priv->has_mob) |
vmw_3d_resource_dec(dev_priv, false); |
res_free(res); |
return ret; |
580,7 → 607,8 |
static struct vmw_resource * |
vmw_user_surface_base_to_res(struct ttm_base_object *base) |
{ |
return &(container_of(base, struct vmw_user_surface, base)->srf.res); |
return &(container_of(base, struct vmw_user_surface, |
prime.base)->srf.res); |
} |
/** |
616,7 → 644,7 |
{ |
struct ttm_base_object *base = *p_base; |
struct vmw_user_surface *user_srf = |
container_of(base, struct vmw_user_surface, base); |
container_of(base, struct vmw_user_surface, prime.base); |
struct vmw_resource *res = &user_srf->srf.res; |
*p_base = NULL; |
733,7 → 761,7 |
srf->base_size = *srf->sizes; |
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; |
srf->multisample_count = 1; |
srf->multisample_count = 0; |
cur_bo_offset = 0; |
cur_offset = srf->offsets; |
774,8 → 802,8 |
} |
srf->snooper.crtc = NULL; |
user_srf->base.shareable = false; |
user_srf->base.tfile = NULL; |
user_srf->prime.base.shareable = false; |
user_srf->prime.base.tfile = NULL; |
/** |
* From this point, the generic resource management functions |
787,7 → 815,7 |
goto out_unlock; |
tmp = vmw_resource_reference(&srf->res); |
ret = ttm_base_object_init(tfile, &user_srf->base, |
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, |
req->shareable, VMW_RES_SURFACE, |
&vmw_user_surface_base_release, NULL); |
797,7 → 825,7 |
goto out_unlock; |
} |
rep->sid = user_srf->base.hash.key; |
rep->sid = user_srf->prime.base.hash.key; |
vmw_resource_unreference(&res); |
ttm_read_unlock(&vmaster->lock); |
807,7 → 835,7 |
out_no_offsets: |
kfree(srf->sizes); |
out_no_sizes: |
ttm_base_object_kfree(user_srf, base); |
ttm_prime_object_kfree(user_srf, prime); |
out_no_user_srf: |
ttm_mem_global_free(vmw_mem_glob(dev_priv), size); |
out_unlock: |
826,6 → 854,7 |
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
struct vmw_private *dev_priv = vmw_priv(dev); |
union drm_vmw_surface_reference_arg *arg = |
(union drm_vmw_surface_reference_arg *)data; |
struct drm_vmw_surface_arg *req = &arg->req; |
837,19 → 866,20 |
struct ttm_base_object *base; |
int ret = -EINVAL; |
base = ttm_base_object_lookup(tfile, req->sid); |
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); |
if (unlikely(base == NULL)) { |
DRM_ERROR("Could not find surface to reference.\n"); |
return -EINVAL; |
} |
if (unlikely(base->object_type != VMW_RES_SURFACE)) |
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) |
goto out_bad_resource; |
user_srf = container_of(base, struct vmw_user_surface, base); |
user_srf = container_of(base, struct vmw_user_surface, prime.base); |
srf = &user_srf->srf; |
ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); |
ret = ttm_ref_object_add(tfile, &user_srf->prime.base, |
TTM_REF_USAGE, NULL); |
if (unlikely(ret != 0)) { |
DRM_ERROR("Could not add a reference to a surface.\n"); |
goto out_no_reference; |