Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6295 → Rev 6296

/drivers/video/drm/drm_cache.c
45,7 → 45,7
{
uint8_t *page_virtual;
unsigned int i;
const int size = boot_cpu_data.x86_clflush_size;
const int size = x86_clflush_size;
 
if (unlikely(page == NULL))
return;
101,7 → 101,7
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
if (1) {
struct sg_page_iter sg_iter;
 
mb();
112,8 → 112,6
return;
}
 
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
/drivers/video/drm/drm_crtc.c
1025,6 → 1025,13
ret = drm_sysfs_connector_add(connector);
if (ret)
return ret;
 
ret = drm_debugfs_connector_add(connector);
if (ret) {
drm_sysfs_connector_remove(connector);
return ret;
}
 
return 0;
}
EXPORT_SYMBOL(drm_connector_register);
1038,6 → 1045,7
void drm_connector_unregister(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_debugfs_connector_remove(connector);
}
EXPORT_SYMBOL(drm_connector_unregister);
 
/drivers/video/drm/drm_fb_helper.c
421,8 → 421,8
 
/* Sometimes user space wants everything disabled, so don't steal the
* display if there's a master. */
if (dev->primary->master)
return false;
// if (dev->primary->master)
// return false;
 
drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb)
431,6 → 431,7
bound++;
}
 
dbgprintf("%s bound %d crtcs_bound %d\n", __FUNCTION__, bound, crtcs_bound);
if (bound < crtcs_bound)
return false;
 
1844,17 → 1845,16
}
DRM_DEBUG_KMS("\n");
 
max_width = fb_helper->fb->width;
max_height = fb_helper->fb->height;
max_width = 8192; //fb_helper->fb->width;
max_height = 8192; //fb_helper->fb->height;
 
drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
mutex_unlock(&fb_helper->dev->mode_config.mutex);
 
drm_modeset_lock_all(dev);
drm_setup_crtcs(fb_helper);
drm_modeset_unlock_all(dev);
drm_fb_helper_set_par(fb_helper->fbdev);
 
// drm_modeset_lock_all(dev);
// drm_setup_crtcs(fb_helper);
// drm_modeset_unlock_all(dev);
// drm_fb_helper_set_par(fb_helper->fbdev);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/drivers/video/drm/drm_pci.c
58,8 → 58,7
return NULL;
 
dmah->size = size;
dmah->vaddr = (void*)KernelAlloc(size);
dmah->busaddr = GetPgAddr(dmah->vaddr);
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
 
if (dmah->vaddr == NULL) {
kfree(dmah);
73,28 → 72,20
 
EXPORT_SYMBOL(drm_pci_alloc);
 
#if 0
/**
* \brief Free a PCI consistent memory block without freeing its descriptor.
/*
* Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
unsigned long addr;
size_t sz;
 
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page((void *)addr));
KernelFree(dmah->vaddr);
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
}
}
 
/**
* drm_pci_free - Free a PCI consistent memory block
103,12 → 94,13
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
__drm_pci_free(dev, dmah);
__drm_legacy_pci_free(dev, dmah);
kfree(dmah);
}
 
EXPORT_SYMBOL(drm_pci_free);
 
#if 0
 
static int drm_get_pci_domain(struct drm_device *dev)
{
124,69 → 116,29
return pci_domain_nr(dev->pdev->bus);
}
 
static int drm_pci_get_irq(struct drm_device *dev)
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
return dev->pdev->irq;
}
 
static const char *drm_pci_get_name(struct drm_device *dev)
{
struct pci_driver *pdriver = dev->driver->kdriver.pci;
return pdriver->name;
}
 
static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
struct pci_driver *pdriver = dev->driver->kdriver.pci;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
 
 
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (!master->unique)
return -ENOMEM;
 
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
 
dev->devname =
kmalloc(strlen(pdriver->name) +
master->unique_len + 2, GFP_KERNEL);
 
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
 
sprintf(dev->devname, "%s@%s", pdriver->name,
master->unique);
 
master->unique_len = strlen(master->unique);
return 0;
err:
return ret;
}
EXPORT_SYMBOL(drm_pci_set_busid);
 
static int drm_pci_set_unique(struct drm_device *dev,
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
int domain, bus, slot, func, ret;
const char *bus_name;
 
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
199,17 → 151,6
 
master->unique[master->unique_len] = '\0';
 
bus_name = dev->driver->bus->get_name(dev);
dev->devname = kmalloc(strlen(bus_name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
 
sprintf(dev->devname, "%s@%s", bus_name,
master->unique);
 
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
271,28 → 212,21
dev->agp = NULL;
}
}
 
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
};
#endif
 
/**
* Register.
* drm_get_pci_dev - Register a PCI device with the DRM subsystem
* @pdev: PCI device
* @ent: entry from the PCI ID table that matches @pdev
* @driver: DRM device driver
*
* \param pdev - PCI device structure
* \param ent entry from the PCI ID table with device type flags
* \return zero on success or a negative number on failure.
*
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*
* NOTE: This function is deprecated, please use drm_dev_alloc() and
* drm_dev_register() instead and remove your ->load() callback.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
/drivers/video/drm/drm_probe_helper.c
121,6 → 121,8
poll = true;
}
 
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
 
157,7 → 159,31
old_status = connector->status;
 
connector->status = connector->funcs->detect(connector, true);
 
/*
* Normally either the driver's hpd code or the poll loop should
* pick up any changes and fire the hotplug event. But if
* userspace sneaks in a probe, we might miss a change. Hence
* check here, and if anything changed start the hotplug code.
*/
if (old_status != connector->status) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
connector->base.id,
connector->name,
old_status, connector->status);
 
/*
* The hotplug event code might call into the fb
* helpers, and so expects that we do not hold any
* locks. Fire up the poll struct instead, it will
* disable itself again.
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
schedule_delayed_work(&dev->mode_config.output_poll_work,
0);
}
}
 
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
297,10 → 323,12
*/
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
ENTER();
/* send a uevent + call fbdev */
// drm_sysfs_hotplug_event(dev);
// if (dev->mode_config.funcs->output_poll_changed)
// dev->mode_config.funcs->output_poll_changed(dev);
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
LEAVE();
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
377,9 → 405,12
 
mutex_unlock(&dev->mode_config.mutex);
 
out:;
out:
if (changed)
drm_kms_helper_hotplug_event(dev);
 
 
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
 
/**
396,7 → 427,7
{
if (!dev->mode_config.poll_enabled)
return;
// cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
/drivers/video/drm/drm_stub.c
553,5 → 553,10
}
 
void drm_sysfs_connector_remove(struct drm_connector *connector)
{ }
 
void drm_sysfs_hotplug_event(struct drm_device *dev)
{
DRM_DEBUG("generating hotplug event\n");
}
 
/drivers/video/drm/i915/Makefile
1,8 → 1,8
CC = kos32-gcc
FASM = fasm.exe
 
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_PCI -DCONFIG_X86_CMPXCHG64
DEFINES += -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86 -DCONFIG_X86_32 -DCONFIG_PCI
DEFINES += -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI
DEFINES += -DKBUILD_MODNAME=\"i915.dll\"
 
92,6 → 92,7
intel_frontbuffer.c \
intel_guc_loader.c \
intel_hdmi.c \
intel_hotplug.c \
intel_i2c.c \
intel_lrc.c \
intel_lvds.c \
/drivers/video/drm/i915/i915_dma.c
334,7 → 334,7
goto cleanup_gem;
 
/* Only enable hotplug handling once the fbdev is fully set up. */
// intel_hpd_init(dev_priv);
intel_hpd_init(dev_priv);
 
/*
* Some ports require correctly set-up hpd registers for detection to
360,7 → 360,7
cleanup_irq:
// drm_irq_uninstall(dev);
cleanup_gem_stolen:
// i915_gem_cleanup_stolen(dev);
i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
// vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
900,7 → 900,7
* so there is no point in running more than one instance of the
* workqueue at any time. Use an ordered one.
*/
dev_priv->wq = (struct workqueue_struct *)alloc_ordered_workqueue("i915", 0);
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
908,6 → 908,12
}
system_wq = dev_priv->wq;
 
dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
if (dev_priv->hotplug.dp_wq == NULL) {
DRM_ERROR("Failed to create our dp workqueue.\n");
ret = -ENOMEM;
goto out_freewq;
}
 
intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
971,8 → 977,10
return 0;
 
out_power_well:
drm_vblank_cleanup(dev);
out_gem_unload:
 
out_freewq:
out_mtrrfree:
out_gtt:
i915_global_gtt_cleanup(dev);
/drivers/video/drm/i915/i915_drv.h
53,15 → 53,6
#include <linux/spinlock.h>
#include <linux/err.h>
 
extern int i915_fbsize;
extern struct drm_i915_gem_object *main_fb_obj;
extern struct drm_framebuffer *main_framebuffer;
 
static struct drm_i915_gem_object *get_fb_obj()
{
return main_fb_obj;
};
 
#define ioread32(addr) readl(addr)
static inline u8 inb(u16 port)
{
2692,7 → 2683,6
bool nuclear_pageflip;
int edp_vswing;
/* Kolibri related */
int fbsize;
char *log_file;
char *cmdline_mode;
};
/drivers/video/drm/i915/i915_gem.c
173,6 → 173,128
return 0;
}
 
static int
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
char *vaddr = obj->phys_handle->vaddr;
struct sg_table *st;
struct scatterlist *sg;
int i;
 
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
 
 
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
 
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
 
sg = st->sgl;
sg->offset = 0;
sg->length = obj->base.size;
 
sg_dma_address(sg) = obj->phys_handle->busaddr;
sg_dma_len(sg) = obj->base.size;
 
obj->pages = st;
return 0;
}
 
static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
{
int ret;
 
BUG_ON(obj->madv == __I915_MADV_PURGED);
 
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
WARN_ON(ret != -EIO);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
 
if (obj->dirty) {
obj->dirty = 0;
}
 
sg_free_table(obj->pages);
kfree(obj->pages);
}
 
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
drm_pci_free(obj->base.dev, obj->phys_handle);
}
 
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
.release = i915_gem_object_release_phys,
};
 
static int
drop_pages(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma, *next;
int ret;
 
drm_gem_object_reference(&obj->base);
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
 
ret = i915_gem_object_put_pages(obj);
drm_gem_object_unreference(&obj->base);
 
return ret;
}
 
int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
drm_dma_handle_t *phys;
int ret;
 
if (obj->phys_handle) {
if ((unsigned long)obj->phys_handle->vaddr & (align -1))
return -EBUSY;
 
return 0;
}
 
if (obj->madv != I915_MADV_WILLNEED)
return -EFAULT;
 
if (obj->base.filp == NULL)
return -EINVAL;
 
ret = drop_pages(obj);
if (ret)
return ret;
 
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
if (!phys)
return -ENOMEM;
 
obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
 
return i915_gem_object_get_pages(obj);
}
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
633,7 → 755,6
 
return ret ? -EFAULT : 0;
}
#if 0
 
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
668,9 → 789,7
 
return ret ? -EFAULT : 0;
}
#endif
 
 
static int
i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
757,13 → 876,11
 
hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__);
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
 
// ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
// user_data, page_do_bit17_swizzling,
// partial_cacheline_write,
// needs_clflush_after);
 
mutex_lock(&dev->struct_mutex);
 
if (ret)
862,8 → 979,9
* textures). Fallback to the shmem path in that case. */
}
 
if (ret == -EFAULT || ret == -ENOSPC)
if (ret == -EFAULT || ret == -ENOSPC) {
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
}
 
out:
drm_gem_object_unreference(&obj->base);
1747,6 → 1865,10
}
 
i915_gem_gtt_finish_object(obj);
 
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj);
 
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
 
1859,6 → 1981,9
if (ret)
goto err_pages;
 
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
 
if (obj->tiling_mode != I915_TILING_NONE &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
i915_gem_object_pin_pages(obj);
/drivers/video/drm/i915/i915_gem_execbuffer.c
40,13 → 40,6
 
#define BATCH_OFFSET_BIAS (256*1024)
 
static unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
memcpy(to, from, n);
return 0;
}
 
struct eb_vmas {
struct list_head vmas;
int and;
/drivers/video/drm/i915/i915_gem_fence.c
758,6 → 758,7
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
i++;
}
/drivers/video/drm/i915/i915_irq.c
1445,7 → 1445,7
 
*pin_mask |= BIT(i);
 
// if (!intel_hpd_pin_to_port(i, &port))
if (!intel_hpd_pin_to_port(i, &port))
continue;
 
if (long_pulse_detect(port, dig_hotplug_reg))
1699,7 → 1699,7
hotplug_trigger, hpd_status_g4x,
i9xx_port_hotplug_long_detect);
 
// intel_hpd_irq_handler(dev, pin_mask, long_mask);
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
 
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1711,7 → 1711,7
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
hotplug_trigger, hpd_status_i915,
i9xx_port_hotplug_long_detect);
// intel_hpd_irq_handler(dev, pin_mask, long_mask);
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
}
}
1819,7 → 1819,7
dig_hotplug_reg, hpd,
pch_port_hotplug_long_detect);
 
// intel_hpd_irq_handler(dev, pin_mask, long_mask);
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
 
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1984,6 → 1984,9
spt_port_hotplug2_long_detect);
}
 
if (pin_mask)
intel_hpd_irq_handler(dev, pin_mask, long_mask);
 
if (pch_iir & SDE_GMBUS_CPT)
gmbus_irq_handler(dev);
}
2001,6 → 2004,7
dig_hotplug_reg, hpd,
ilk_port_hotplug_long_detect);
 
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
 
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2009,8 → 2013,8
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
 
// if (hotplug_trigger)
// ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
if (hotplug_trigger)
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
 
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
2189,6 → 2193,7
dig_hotplug_reg, hpd,
bxt_port_hotplug_long_detect);
 
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
 
static irqreturn_t gen8_irq_handler(int irq, void *arg)
4316,7 → 4321,7
{
struct drm_device *dev = dev_priv->dev;
 
// intel_hpd_init_work(dev_priv);
intel_hpd_init_work(dev_priv);
 
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/drivers/video/drm/i915/i915_params.c
55,7 → 55,6
.edp_vswing = 0,
.enable_guc_submission = false,
.guc_log_level = -1,
.fbsize = 16,
.log_file = NULL,
.cmdline_mode = NULL,
};
/drivers/video/drm/i915/intel_display.c
3301,7 → 3301,7
 
intel_display_resume(dev);
 
// intel_hpd_init(dev_priv);
intel_hpd_init(dev_priv);
 
drm_modeset_unlock_all(dev);
}
14442,7 → 14442,6
return ret;
}
kolibri_framebuffer_init(intel_fb);
 
return 0;
}
 
/drivers/video/drm/i915/intel_dp.c
4639,8 → 4639,23
{
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum port port;
u32 bit = 0;
u32 bit;
 
intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
switch (port) {
case PORT_A:
bit = BXT_DE_PORT_HP_DDIA;
break;
case PORT_B:
bit = BXT_DE_PORT_HP_DDIB;
break;
case PORT_C:
bit = BXT_DE_PORT_HP_DDIC;
break;
default:
MISSING_CASE(port);
return false;
}
 
return I915_READ(GEN8_DE_PORT_ISR) & bit;
}
6100,6 → 6115,8
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
 
i915_debugfs_connector_add(connector);
 
return true;
}
 
/drivers/video/drm/i915/intel_fbdev.c
44,32 → 44,6
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
{
#define BYTES_PER_LONG (BITS_PER_LONG/8)
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
int fb_info_size = sizeof(struct fb_info);
struct fb_info *info;
char *p;
 
if (size)
fb_info_size += PADDING;
 
p = kzalloc(fb_info_size + size, GFP_KERNEL);
 
if (!p)
return NULL;
 
info = (struct fb_info *) p;
 
if (size)
info->par = p + fb_info_size;
 
return info;
#undef PADDING
#undef BYTES_PER_LONG
}
 
static int intel_fbdev_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
267,6 → 241,8
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
 
info->screen_base = (void*) 0xFE000000;
info->screen_size = size;
526,6 → 502,19
.fb_probe = intelfb_create,
};
 
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
 
drm_fb_helper_unregister_fbi(&ifbdev->helper);
drm_fb_helper_release_fbi(&ifbdev->helper);
 
drm_fb_helper_fini(&ifbdev->helper);
 
drm_framebuffer_unregister_private(&ifbdev->fb->base);
drm_framebuffer_remove(&ifbdev->fb->base);
}
 
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
/drivers/video/drm/i915/intel_runtime_pm.c
894,7 → 894,7
if (dev_priv->power_domains.initializing)
return;
 
// intel_hpd_init(dev_priv);
intel_hpd_init(dev_priv);
 
i915_redisable_vga_power_on(dev_priv->dev);
}
/drivers/video/drm/i915/kms_display.c
88,7 → 88,7
size = stride * ALIGN(mode->vdisplay, 2);
}
 
dbgprintf("size %x stride %x\n", size, stride);
DRM_DEBUG_KMS("size %x stride %x\n", size, stride);
 
if(intel_fb == NULL || size > intel_fb->obj->base.size)
{
148,6 → 148,20
obj->stride = stride;
};
 
if (obj->base.name == 0)
{
int ret;
 
mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT);
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
obj->base.name = ret;
obj->base.handle_count++;
DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name );
}
 
fb->width = mode->hdisplay;
fb->height = mode->vdisplay;
 
561,19 → 575,6
return -1;
};
 
/*
mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
if (!main_fb_obj->base.name) {
ret = idr_alloc(&dev->object_name_idr, &main_fb_obj->base, 1, 0, GFP_NOWAIT);
 
main_fb_obj->base.name = ret;
main_fb_obj->base.handle_count++;
DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, main_fb_obj->base.name );
}
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
*/
dummy_fb_page = AllocPage();
 
os_display = GetDisplay();
820,10 → 821,10
fb->pipe = crtc->pipe;
}
safe_sti(ifl);
 
return 0;
}
 
 
int kolibri_framebuffer_init(struct intel_framebuffer *intel_fb)
{
struct kos_framebuffer *kfb;
830,7 → 831,7
addr_t dummy_table;
addr_t *pt_addr = NULL;
int pde;
ENTER();
 
kfb = kzalloc(sizeof(struct kos_framebuffer),0);
kfb->private = intel_fb;
 
845,7 → 846,7
};
 
intel_fb->private = kfb;
LEAVE();
 
return 0;
#if 0
struct sg_page_iter sg_iter;
/drivers/video/drm/i915/kos_cursor.c
71,7 → 71,8
static int init_cursor(cursor_t *cursor)
{
display_t *display = GetDisplay();
struct drm_i915_private *dev_priv = display->ddev->dev_private;
struct drm_device *dev = display->ddev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
uint32_t *bits;
uint32_t *src;
80,11 → 81,16
int i,j;
int ret;
 
mutex_lock(&dev->struct_mutex);
 
if (dev_priv->info.cursor_needs_physical)
{
bits = (uint32_t*)KernelAlloc(KMS_CURSOR_WIDTH*KMS_CURSOR_HEIGHT*8);
if (unlikely(bits == NULL))
return ENOMEM;
{
ret = -ENOMEM;
goto unlock;
};
cursor->cobj = (struct drm_i915_gem_object *)GetPgAddr(bits);
}
else
91,21 → 97,19
{
obj = i915_gem_alloc_object(display->ddev, KMS_CURSOR_WIDTH*KMS_CURSOR_HEIGHT*4);
if (unlikely(obj == NULL))
return -ENOMEM;
{
ret = -ENOMEM;
goto unlock;
};
 
ret = i915_gem_object_ggtt_pin(obj, &i915_ggtt_view_normal, 128*1024, PIN_GLOBAL);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
if (ret)
goto unref;
 
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
{
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
goto unpin;
 
/* You don't need to worry about fragmentation issues.
* GTT space is continuous. I guarantee it. */
 
114,13 → 118,14
 
if (unlikely(bits == NULL))
{
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
return -ENOMEM;
ret = -ENOMEM;
goto unpin;
};
cursor->cobj = obj;
};
 
mutex_unlock(&dev->struct_mutex);
 
src = cursor->data;
 
for(i = 0; i < 32; i++)
139,6 → 144,14
cursor->header.destroy = destroy_cursor;
 
return 0;
 
unpin:
i915_gem_object_ggtt_unpin(obj);
unref:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
void init_system_cursors(struct drm_device *dev)
150,7 → 163,6
display = GetDisplay();
 
mutex_init(&cursor_lock);
mutex_lock(&dev->struct_mutex);
 
ifl = safe_cli();
{
172,6 → 184,4
select_cursor_kms(display->cursor);
};
safe_sti(ifl);
 
mutex_unlock(&dev->struct_mutex);
}
/drivers/video/drm/i915/main.c
14,7 → 14,7
#include "bitmap.h"
#include "i915_kos32.h"
 
#define DRV_NAME "i915 v4.4.2-dbg2"
#define DRV_NAME "i915 v4.4.3"
 
#define I915_DEV_CLOSE 0
#define I915_DEV_INIT 1
60,7 → 60,6
};
 
dev_priv = main_device->dev_private;
cwq = dev_priv->wq;
 
asm volatile("int $0x40":"=a"(tmp):"a"(66),"b"(1),"c"(1));
asm volatile("int $0x40":"=a"(tmp):"a"(66),"b"(4),"c"(0x46),"d"(0x330));
92,9 → 91,9
else if(key.code == 0xC6)
dpms_lock = 0;
};
cwq = dev_priv->wq;
 
spin_lock_irqsave(&cwq->lock, irqflags);
 
while (!list_empty(&cwq->worklist))
{
struct work_struct *work = list_entry(cwq->worklist.next,
106,8 → 105,23
f(work);
spin_lock_irqsave(&cwq->lock, irqflags);
}
spin_unlock_irqrestore(&cwq->lock, irqflags);
 
cwq = dev_priv->hotplug.dp_wq;
 
spin_lock_irqsave(&cwq->lock, irqflags);
while (!list_empty(&cwq->worklist))
{
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;
list_del_init(cwq->worklist.next);
 
spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work);
spin_lock_irqsave(&cwq->lock, irqflags);
}
spin_unlock_irqrestore(&cwq->lock, irqflags);
 
delay(1);
};
/drivers/video/drm/i915/utils.c
250,240 → 250,6
}
 
 
//const char hex_asc[] = "0123456789abcdef";
 
/**
* hex_to_bin - convert a hex digit to its real value
* @ch: ascii character represents hex digit
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
*/
int hex_to_bin(char ch)
{
if ((ch >= '0') && (ch <= '9'))
return ch - '0';
ch = tolower(ch);
if ((ch >= 'a') && (ch <= 'f'))
return ch - 'a' + 10;
return -1;
}
EXPORT_SYMBOL(hex_to_bin);
 
/**
* hex2bin - convert an ascii hexadecimal string to its binary representation
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
*
* Return 0 on success, -1 in case of bad input.
*/
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
int hi = hex_to_bin(*src++);
int lo = hex_to_bin(*src++);
 
if ((hi < 0) || (lo < 0))
return -1;
 
*dst++ = (hi << 4) | lo;
}
return 0;
}
EXPORT_SYMBOL(hex2bin);
 
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @linebuf: where to put the converted data
* @linebuflen: total size of @linebuf, including space for terminating NUL
* @ascii: include ASCII after the hex output
*
* hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
*
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
* to a hex + ASCII dump at the supplied memory location.
* The converted output is always NUL-terminated.
*
* E.g.:
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
* linebuf, sizeof(linebuf), true);
*
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
*/
int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
char *linebuf, size_t linebuflen, bool ascii)
{
const u8 *ptr = buf;
int ngroups;
u8 ch;
int j, lx = 0;
int ascii_column;
int ret;
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
if (!is_power_of_2(groupsize) || groupsize > 8)
groupsize = 1;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
 
ngroups = len / groupsize;
ascii_column = rowsize * 2 + rowsize / groupsize + 1;
 
if (!linebuflen)
goto overflow1;
 
if (!len)
goto nil;
 
if (groupsize == 8) {
const u64 *ptr8 = buf;
 
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
(unsigned long long)*(ptr8 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
} else if (groupsize == 4) {
const u32 *ptr4 = buf;
 
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "",
*(ptr4 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
} else if (groupsize == 2) {
const u16 *ptr2 = buf;
 
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "",
*(ptr2 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
} else {
for (j = 0; j < len; j++) {
if (linebuflen < lx + 3)
goto overflow2;
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
linebuf[lx++] = hex_asc_lo(ch);
linebuf[lx++] = ' ';
}
if (j)
lx--;
}
if (!ascii)
goto nil;
 
while (lx < ascii_column) {
if (linebuflen < lx + 2)
goto overflow2;
linebuf[lx++] = ' ';
}
for (j = 0; j < len; j++) {
if (linebuflen < lx + 2)
goto overflow2;
ch = ptr[j];
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
}
nil:
linebuf[lx] = '\0';
return lx;
overflow2:
linebuf[lx++] = '\0';
overflow1:
return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
}
/**
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
* @level: kernel log level (e.g. KERN_DEBUG)
* @prefix_str: string to prefix each line with;
* caller supplies trailing spaces for alignment if desired
* @prefix_type: controls whether prefix of an offset, address, or none
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @ascii: include ASCII after the hex output
*
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
* to the kernel log at the specified kernel log level, with an optional
* leading prefix.
*
* print_hex_dump() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
* print_hex_dump() iterates over the entire input @buf, breaking it into
* "line size" chunks to format and print.
*
* E.g.:
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
* 16, 1, frame->data, frame->len, true);
*
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
*/
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
const u8 *ptr = buf;
int i, linelen, remaining = len;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
for (i = 0; i < len; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
 
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
linebuf, sizeof(linebuf), ascii);
 
switch (prefix_type) {
case DUMP_PREFIX_ADDRESS:
printk("%s%s%p: %s\n",
level, prefix_str, ptr + i, linebuf);
break;
case DUMP_PREFIX_OFFSET:
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
break;
default:
printk("%s%s%s\n", level, prefix_str, linebuf);
break;
}
}
}
 
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
const void *buf, size_t len)
{
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
buf, len, true);
}
 
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
783,43 → 549,6
return 0;
}
 
 
signed long
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
{
signed long ret;
 
if (WARN_ON(timeout < 0))
return -EINVAL;
 
// trace_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
// trace_fence_wait_end(fence);
return ret;
}
 
void fence_release(struct kref *kref)
{
struct fence *fence =
container_of(kref, struct fence, refcount);
 
// trace_fence_destroy(fence);
 
BUG_ON(!list_empty(&fence->cb_list));
 
if (fence->ops->release)
fence->ops->release(fence);
else
fence_free(fence);
}
 
void fence_free(struct fence *fence)
{
kfree_rcu(fence, rcu);
}
EXPORT_SYMBOL(fence_free);
 
 
ktime_t ktime_get(void)
{
ktime_t t;
/drivers/video/drm/ttm/ttm_bo.c
153,8 → 153,8
void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
bool never_free)
{
// kref_sub(&bo->list_kref, count,
// (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
kref_sub(&bo->list_kref, count,
(never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
}
 
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
698,6 → 698,8
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type || !man->use_type)
continue;
 
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
&cur_flags);
705,6 → 707,7
if (!type_ok)
continue;
 
type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
717,12 → 720,10
if (mem_type == TTM_PL_SYSTEM)
break;
 
if (man->has_type && man->use_type) {
type_found = true;
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret))
return ret;
}
if (mem->mm_node)
break;
}
733,9 → 734,6
return 0;
}
 
if (!type_found)
return -EINVAL;
 
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
 
743,11 → 741,12
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type)
if (!man->has_type || !man->use_type)
continue;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
continue;
 
type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
773,9 → 772,14
if (ret == -ERESTARTSYS)
has_erestartsys = true;
}
ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
return ret;
 
if (!type_found) {
printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
return -EINVAL;
}
 
return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
 
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1009,6 → 1013,61
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
 
int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
size_t acc_size;
int ret;
 
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(bo == NULL))
return -ENOMEM;
 
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
interruptible, persistent_swap_storage, acc_size,
NULL, NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
 
return ret;
}
EXPORT_SYMBOL(ttm_bo_create);
 
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
unsigned mem_type, bool allow_errors)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
int ret;
 
/*
* Can't use standard list traversal since we're unlocking.
*/
 
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
if (ret) {
if (allow_errors) {
return ret;
} else {
pr_err("Cleanup eviction failed\n");
}
}
spin_lock(&glob->lru_lock);
}
spin_unlock(&glob->lru_lock);
return 0;
}
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_size)
{
1209,4 → 1268,51
}
EXPORT_SYMBOL(ttm_bo_wait);
 
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
int ret = 0;
 
/*
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
 
ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_wait(bo, false, true, no_wait);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
 
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
atomic_dec(&bo->cpu_writers);
}
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
{
int ret;
 
/*
* In the absense of a wait_unlocked API,
* Use the bo::wu_mutex to avoid triggering livelocks due to
* concurrent use of this function. Note that this use of
* bo::wu_mutex can go away if we change locking order to
* mmap_sem -> bo::reserve.
*/
ret = mutex_lock_interruptible(&bo->wu_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
if (!ww_mutex_is_locked(&bo->resv->lock))
goto out_unlock;
ret = __ttm_bo_reserve(bo, true, false, false, NULL);
if (unlikely(ret != 0))
goto out_unlock;
__ttm_bo_unreserve(bo);
 
out_unlock:
mutex_unlock(&bo->wu_mutex);
return ret;
}
/drivers/video/drm/ttm/ttm_bo_util.c
33,11 → 33,11
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
//#include <linux/io.h>
#include <linux/io.h>
//#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/slab.h>
//#include <linux/vmalloc.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
 
#define __pgprot(x) ((pgprot_t) { (x) } )
163,7 → 163,6
}
EXPORT_SYMBOL(ttm_mem_io_free);
 
#if 0
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
183,7 → 182,6
}
return 0;
}
#endif
 
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
216,7 → 214,7
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
483,7 → 481,7
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
514,15 → 512,13
 
map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm->pages[start_page];
map->virtual = (void*)MapIoMem(page_to_phys(map->page), 4096, PG_SW);
map->virtual = kmap(map->page);
} else {
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
PAGE_KERNEL :
ttm_io_prot(mem->placement, PAGE_KERNEL);
prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
578,8 → 574,9
iounmap(map->virtual);
break;
case ttm_bo_map_vmap:
break;
case ttm_bo_map_kmap:
FreeKernelSpace(map->virtual);
kunmap(map->page);
break;
case ttm_bo_map_premapped:
break;
/drivers/video/drm/ttm/ttm_lock.c
28,10 → 28,9
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
 
#include <linux/mutex.h>
#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_module.h>
//#include <linux/atomic.h>
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched.h>
164,7 → 163,7
spin_unlock(&lock->lock);
}
} else
wait_event(lock->queue, __ttm_read_lock(lock));
wait_event(lock->queue, __ttm_write_lock(lock));
 
return ret;
}
/drivers/video/drm/ttm/ttm_memory.c
37,6 → 37,7
#include <linux/module.h>
#include <linux/slab.h>
 
#define TTM_MEMORY_ALLOC_RETRIES 4
 
 
int ttm_mem_global_init(struct ttm_mem_global *glob)
67,10 → 68,26
 
 
}
EXPORT_SYMBOL(ttm_mem_global_release);
 
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
 
}
EXPORT_SYMBOL(ttm_mem_global_free);
 
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible)
{
/**
* Normal allocations of kernel memory are registered in
* all zones.
*/
 
return 0;
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
 
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)
/drivers/video/drm/ttm/ttm_object.c
58,8 → 58,6
 
#define pr_fmt(fmt) "[TTM] " fmt
 
#include <linux/mutex.h>
 
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h>
#include <linux/list.h>
66,16 → 64,8
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
//#include <linux/atomic.h>
#include <linux/atomic.h>
 
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
return atomic_add_unless(&kref->refcount, 1, 0);
}
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
struct ttm_object_file {
struct ttm_object_device *tdev;
spinlock_t lock;
101,6 → 91,9
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
size_t dma_buf_size;
};
 
/**
125,6 → 118,7
*/
 
struct ttm_ref_object {
struct rcu_head rcu_head;
struct drm_hash_item hash;
struct list_head head;
struct kref kref;
240,7 → 234,7
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
int ret;
 
// rcu_read_lock();
rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, key, &hash);
 
if (likely(ret == 0)) {
248,7 → 242,7
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
// rcu_read_unlock();
rcu_read_unlock();
 
return base;
}
262,6 → 256,7
struct drm_open_hash *ht = &tdev->object_hash;
int ret;
 
rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, key, &hash);
 
if (likely(ret == 0)) {
269,6 → 264,7
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
rcu_read_unlock();
 
return base;
}
288,15 → 284,18
*existed = true;
 
while (ret == -EINVAL) {
rcu_read_lock();
ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
 
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
if (!kref_get_unless_zero(&ref->kref)) {
if (kref_get_unless_zero(&ref->kref)) {
rcu_read_unlock();
break;
}
}
 
rcu_read_unlock();
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
355,7 → 354,7
 
ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref);
kfree_rcu(ref, rcu_head);
spin_lock(&tfile->lock);
}
 
462,11 → 461,11
if (ret != 0)
goto out_no_object_hash;
 
// tdev->ops = *ops;
// tdev->dmabuf_release = tdev->ops.release;
// tdev->ops.release = ttm_prime_dmabuf_release;
// tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
// ttm_round_pot(sizeof(struct file));
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
ttm_round_pot(sizeof(struct file));
return tdev;
 
out_no_object_hash:
488,3 → 487,230
kfree(tdev);
}
EXPORT_SYMBOL(ttm_object_device_release);
 
/**
* get_dma_buf_unless_doomed - get a dma_buf reference if possible.
*
* @dma_buf: Non-refcounted pointer to a struct dma-buf.
*
* Obtain a file reference from a lookup structure that doesn't refcount
* the file, but synchronizes with its release method to make sure it has
* not been freed yet. See for example kref_get_unless_zero documentation.
* Returns true if refcounting succeeds, false otherwise.
*
* Nobody really wants this as a public API yet, so let it mature here
* for some time...
*/
static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
{
return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
}
 
/**
* ttm_prime_refcount_release - refcount release method for a prime object.
*
* @p_base: Pointer to ttm_base_object pointer.
*
* This is a wrapper that calls the refcount_release founction of the
* underlying object. At the same time it cleans up the prime object.
* This function is called when all references to the base object we
* derive from are gone.
*/
static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct ttm_prime_object *prime;
 
*p_base = NULL;
prime = container_of(base, struct ttm_prime_object, base);
BUG_ON(prime->dma_buf != NULL);
mutex_destroy(&prime->mutex);
if (prime->refcount_release)
prime->refcount_release(&base);
}
 
/**
* ttm_prime_dmabuf_release - Release method for the dma-bufs we export
*
* @dma_buf:
*
* This function first calls the dma_buf release method the driver
* provides. Then it cleans up our dma_buf pointer used for lookup,
* and finally releases the reference the dma_buf has on our base
* object.
*/
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
{
struct ttm_prime_object *prime =
(struct ttm_prime_object *) dma_buf->priv;
struct ttm_base_object *base = &prime->base;
struct ttm_object_device *tdev = base->tfile->tdev;
 
if (tdev->dmabuf_release)
tdev->dmabuf_release(dma_buf);
mutex_lock(&prime->mutex);
if (prime->dma_buf == dma_buf)
prime->dma_buf = NULL;
mutex_unlock(&prime->mutex);
ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
ttm_base_object_unref(&base);
}
 
/**
* ttm_prime_fd_to_handle - Get a base object handle from a prime fd
*
* @tfile: A struct ttm_object_file identifying the caller.
* @fd: The prime / dmabuf fd.
* @handle: The returned handle.
*
* This function returns a handle to an object that previously exported
* a dma-buf. Note that we don't handle imports yet, because we simply
* have no consumers of that implementation.
*/
int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
int fd, u32 *handle)
{
struct ttm_object_device *tdev = tfile->tdev;
struct dma_buf *dma_buf;
struct ttm_prime_object *prime;
struct ttm_base_object *base;
int ret;
 
dma_buf = dma_buf_get(fd);
if (IS_ERR(dma_buf))
return PTR_ERR(dma_buf);
 
if (dma_buf->ops != &tdev->ops)
return -ENOSYS;
 
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
 
dma_buf_put(dma_buf);
 
return ret;
}
EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
 
/**
* ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
*
* @tfile: Struct ttm_object_file identifying the caller.
* @handle: Handle to the object we're exporting from.
* @flags: flags for dma-buf creation. We just pass them on.
* @prime_fd: The returned file descriptor.
*
*/
int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
uint32_t handle, uint32_t flags,
int *prime_fd)
{
struct ttm_object_device *tdev = tfile->tdev;
struct ttm_base_object *base;
struct dma_buf *dma_buf;
struct ttm_prime_object *prime;
int ret;
 
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL ||
base->object_type != ttm_prime_type)) {
ret = -ENOENT;
goto out_unref;
}
 
prime = container_of(base, struct ttm_prime_object, base);
if (unlikely(!base->shareable)) {
ret = -EPERM;
goto out_unref;
}
 
ret = mutex_lock_interruptible(&prime->mutex);
if (unlikely(ret != 0)) {
ret = -ERESTARTSYS;
goto out_unref;
}
 
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
exp_info.ops = &tdev->ops;
exp_info.size = prime->size;
exp_info.flags = flags;
exp_info.priv = prime;
 
/*
* Need to create a new dma_buf, with memory accounting.
*/
ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
false, true);
if (unlikely(ret != 0)) {
mutex_unlock(&prime->mutex);
goto out_unref;
}
 
dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
ttm_mem_global_free(tdev->mem_glob,
tdev->dma_buf_size);
mutex_unlock(&prime->mutex);
goto out_unref;
}
 
/*
* dma_buf has taken the base object reference
*/
base = NULL;
prime->dma_buf = dma_buf;
}
mutex_unlock(&prime->mutex);
 
ret = dma_buf_fd(dma_buf, flags);
if (ret >= 0) {
*prime_fd = ret;
ret = 0;
} else
dma_buf_put(dma_buf);
 
out_unref:
if (base)
ttm_base_object_unref(&base);
return ret;
}
EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
 
/**
* ttm_prime_object_init - Initialize a ttm_prime_object
*
* @tfile: struct ttm_object_file identifying the caller
* @size: The size of the dma_bufs we export.
* @prime: The object to be initialized.
* @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init
* @ref_obj_release: See ttm_base_object_init
*
* Initializes an object which is compatible with the drm_prime model
* for data sharing between processes and devices.
*/
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
struct ttm_prime_object *prime, bool shareable,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object **),
void (*ref_obj_release) (struct ttm_base_object *,
enum ttm_ref_type ref_type))
{
mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size);
prime->real_type = type;
prime->dma_buf = NULL;
prime->refcount_release = refcount_release;
return ttm_base_object_init(tfile, &prime->base, shareable,
ttm_prime_type,
ttm_prime_refcount_release,
ref_obj_release);
}
EXPORT_SYMBOL(ttm_prime_object_init);
/drivers/video/drm/ttm/ttm_page_alloc.c
41,7 → 41,7
#include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
//#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
 
#include <linux/atomic.h>
 
122,6 → 122,12
};
};
 
static void ttm_pool_kobj_release(struct kobject *kobj)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
kfree(m);
}
 
static struct ttm_pool_manager *_manager;
 
/drivers/video/drm/vmwgfx/Makefile
1,27 → 1,30
 
 
CC = gcc
LD = ld
AS = as
CC = kos32-gcc
FASM = fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
DEFINES = -DDRM_DEBUG_CODE=0 -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_PCI -DCONFIG_X86_CMPXCHG64
DEFINES += -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES+= -DKBUILD_MODNAME=\"vmwgfx.dll\"
 
DRV_TOPDIR = $(CURDIR)/../../..
DRV_INCLUDES = $(DRV_TOPDIR)/include
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/uapi
INCLUDES = -I$(DRV_INCLUDES) \
-I$(DRV_INCLUDES)/asm \
-I$(DRV_INCLUDES)/uapi \
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
 
CFLAGS = -c -Os $(INCLUDES) -fomit-frame-pointer -fno-builtin-printf
CFLAGS+= -mno-ms-bitfields
CFLAGS= -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -fno-ident -msse2 -fomit-frame-pointer -fno-builtin-printf
CFLAGS+= -mno-stack-arg-probe -mno-ms-bitfields
 
LIBPATH:= $(DRV_TOPDIR)/ddk
LIBPATH:= $(DDK_TOPDIR)
 
LIBS:= -lddk -lcore -lgcc
 
LDFLAGS = -nostdlib -shared -s --image-base 0\
PE_FLAGS = --major-os-version 0 --minor-os-version 7 --major-subsystem-version 0 \
--minor-subsystem-version 5 --subsystem native
 
LDFLAGS = -nostdlib -shared -s $(PE_FLAGS) --image-base 0\
--file-alignment 512 --section-alignment 4096
 
 
38,13 → 41,16
$(DRV_INCLUDES)/drm/drm_mm.h
 
NAME_SRC= \
main.c \
pci.c \
vmwgfx_binding.c \
vmwgfx_buffer.c \
vmwgfx_cmdbuf.c \
vmwgfx_cmdbuf_res.c \
vmwgfx_context.c \
vmwgfx_cotable.c \
vmwgfx_dmabuf.c \
vmwgfx_drv.c \
vmwgfx_execbuf.c \
vmwgfx_fb.c \
vmwgfx_fence.c \
vmwgfx_fifo.c \
vmwgfx_gmr.c \
51,17 → 57,25
vmwgfx_gmrid_manager.c \
vmwgfx_irq.c \
vmwgfx_kms.c \
vmwgfx_ldu.c \
vmwgfx_marker.c \
vmwgfx_mob.c \
vmwgfx_overlay.c \
vmwgfx_prime.c \
vmwgfx_resource.c \
vmwgfx_scrn.c \
vmwgfx_shader.c \
vmwgfx_so.c \
vmwgfx_stdu.c \
vmwgfx_surface.c \
vmwgfx_ttm_glue.c \
main.c \
pci.c \
../hdmi.c \
../i2c/i2c-core.c \
../ttm/ttm_bo.c \
../ttm/ttm_bo_manager.c \
../ttm/ttm_bo_util.c \
../ttm/ttm_execbuf_util.c \
../ttm/ttm_lock.c \
../ttm/ttm_memory.c \
68,6 → 82,10
../ttm/ttm_object.c \
../ttm/ttm_page_alloc.c \
../ttm/ttm_tt.c \
../drm_atomic.c \
../drm_atomic_helper.c \
../drm_bridge.c \
../drm_cache.c \
../drm_crtc.c \
../drm_crtc_helper.c \
../drm_drv.c \
78,7 → 96,10
../drm_irq.c \
../drm_mm.c \
../drm_modes.c \
../drm_modeset_lock.c \
../drm_pci.c \
../drm_plane_helper.c \
../drm_rect.c \
../drm_stub.c \
../drm_vma_manager.c
 
92,7 → 113,7
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a vmw.lds Makefile
$(NAME).dll: $(NAME_OBJS) vmw.lds Makefile
$(LD) -L$(LIBPATH) $(LDFLAGS) -T vmw.lds -o $@ $(NAME_OBJS) $(LIBS)
 
 
/drivers/video/drm/vmwgfx/device_include/includeCheck.h
0,0 → 1,3
/*
* Intentionally empty file.
*/
/drivers/video/drm/vmwgfx/device_include/svga3d_caps.h
0,0 → 1,110
/**********************************************************
* Copyright 2007-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga3d_caps.h --
*
* Definitions for SVGA3D hardware capabilities. Capabilities
* are used to query for optional rendering features during
* driver initialization. The capability data is stored as very
* basic key/value dictionary within the "FIFO register" memory
* area at the beginning of BAR2.
*
* Note that these definitions are only for 3D capabilities.
* The SVGA device also has "device capabilities" and "FIFO
* capabilities", which are non-3D-specific and are stored as
* bitfields rather than key/value pairs.
*/
 
#ifndef _SVGA3D_CAPS_H_
#define _SVGA3D_CAPS_H_
 
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
 
#include "includeCheck.h"
 
#include "svga_reg.h"
 
#define SVGA_FIFO_3D_CAPS_SIZE (SVGA_FIFO_3D_CAPS_LAST - \
SVGA_FIFO_3D_CAPS + 1)
 
 
/*
* SVGA3dCapsRecordType
*
* Record types that can be found in the caps block.
* Related record types are grouped together numerically so that
* SVGA3dCaps_FindRecord() can be applied on a range of record
* types.
*/
 
typedef enum {
SVGA3DCAPS_RECORD_UNKNOWN = 0,
SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
} SVGA3dCapsRecordType;
 
 
/*
* SVGA3dCapsRecordHeader
*
* Header field leading each caps block record. Contains the offset (in
* register words, NOT bytes) to the next caps block record (or the end
* of caps block records which will be a zero word) and the record type
* as defined above.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCapsRecordHeader {
uint32 length;
SVGA3dCapsRecordType type;
}
#include "vmware_pack_end.h"
SVGA3dCapsRecordHeader;
 
 
/*
* SVGA3dCapsRecord
*
* Caps block record; "data" is a placeholder for the actual data structure
* contained within the record;
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCapsRecord {
SVGA3dCapsRecordHeader header;
uint32 data[1];
}
#include "vmware_pack_end.h"
SVGA3dCapsRecord;
 
 
typedef uint32 SVGA3dCapPair[2];
 
#endif
/drivers/video/drm/vmwgfx/device_include/svga3d_cmd.h
0,0 → 1,2071
/**********************************************************
* Copyright 1998-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga3d_cmd.h --
*
* SVGA 3d hardware cmd definitions
*/
 
#ifndef _SVGA3D_CMD_H_
#define _SVGA3D_CMD_H_
 
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
 
#include "includeCheck.h"
#include "svga3d_types.h"
 
/*
* Identifiers for commands in the command FIFO.
*
* IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
* the SVGA3D protocol and remain reserved; they should not be used in the
* future.
*
* IDs between 1040 and 1999 (inclusive) are available for use by the
* current SVGA3D protocol.
*
* FIFO clients other than SVGA3D should stay below 1000, or at 2000
* and up.
*/
 
typedef enum {
SVGA_3D_CMD_LEGACY_BASE = 1000,
SVGA_3D_CMD_BASE = 1040,
 
SVGA_3D_CMD_SURFACE_DEFINE = 1040,
SVGA_3D_CMD_SURFACE_DESTROY = 1041,
SVGA_3D_CMD_SURFACE_COPY = 1042,
SVGA_3D_CMD_SURFACE_STRETCHBLT = 1043,
SVGA_3D_CMD_SURFACE_DMA = 1044,
SVGA_3D_CMD_CONTEXT_DEFINE = 1045,
SVGA_3D_CMD_CONTEXT_DESTROY = 1046,
SVGA_3D_CMD_SETTRANSFORM = 1047,
SVGA_3D_CMD_SETZRANGE = 1048,
SVGA_3D_CMD_SETRENDERSTATE = 1049,
SVGA_3D_CMD_SETRENDERTARGET = 1050,
SVGA_3D_CMD_SETTEXTURESTATE = 1051,
SVGA_3D_CMD_SETMATERIAL = 1052,
SVGA_3D_CMD_SETLIGHTDATA = 1053,
SVGA_3D_CMD_SETLIGHTENABLED = 1054,
SVGA_3D_CMD_SETVIEWPORT = 1055,
SVGA_3D_CMD_SETCLIPPLANE = 1056,
SVGA_3D_CMD_CLEAR = 1057,
SVGA_3D_CMD_PRESENT = 1058,
SVGA_3D_CMD_SHADER_DEFINE = 1059,
SVGA_3D_CMD_SHADER_DESTROY = 1060,
SVGA_3D_CMD_SET_SHADER = 1061,
SVGA_3D_CMD_SET_SHADER_CONST = 1062,
SVGA_3D_CMD_DRAW_PRIMITIVES = 1063,
SVGA_3D_CMD_SETSCISSORRECT = 1064,
SVGA_3D_CMD_BEGIN_QUERY = 1065,
SVGA_3D_CMD_END_QUERY = 1066,
SVGA_3D_CMD_WAIT_FOR_QUERY = 1067,
SVGA_3D_CMD_PRESENT_READBACK = 1068,
SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN = 1069,
SVGA_3D_CMD_SURFACE_DEFINE_V2 = 1070,
SVGA_3D_CMD_GENERATE_MIPMAPS = 1071,
SVGA_3D_CMD_VIDEO_CREATE_DECODER = 1072,
SVGA_3D_CMD_VIDEO_DESTROY_DECODER = 1073,
SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR = 1074,
SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR = 1075,
SVGA_3D_CMD_VIDEO_DECODE_START_FRAME = 1076,
SVGA_3D_CMD_VIDEO_DECODE_RENDER = 1077,
SVGA_3D_CMD_VIDEO_DECODE_END_FRAME = 1078,
SVGA_3D_CMD_VIDEO_PROCESS_FRAME = 1079,
SVGA_3D_CMD_ACTIVATE_SURFACE = 1080,
SVGA_3D_CMD_DEACTIVATE_SURFACE = 1081,
SVGA_3D_CMD_SCREEN_DMA = 1082,
SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE = 1083,
SVGA_3D_CMD_OPEN_CONTEXT_SURFACE = 1084,
 
SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1087,
SVGA_3D_CMD_LOGICOPS_COLORFILL = 1088,
SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1089,
SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1090,
 
SVGA_3D_CMD_SET_OTABLE_BASE = 1091,
SVGA_3D_CMD_READBACK_OTABLE = 1092,
 
SVGA_3D_CMD_DEFINE_GB_MOB = 1093,
SVGA_3D_CMD_DESTROY_GB_MOB = 1094,
SVGA_3D_CMD_DEAD3 = 1095,
SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING = 1096,
 
SVGA_3D_CMD_DEFINE_GB_SURFACE = 1097,
SVGA_3D_CMD_DESTROY_GB_SURFACE = 1098,
SVGA_3D_CMD_BIND_GB_SURFACE = 1099,
SVGA_3D_CMD_COND_BIND_GB_SURFACE = 1100,
SVGA_3D_CMD_UPDATE_GB_IMAGE = 1101,
SVGA_3D_CMD_UPDATE_GB_SURFACE = 1102,
SVGA_3D_CMD_READBACK_GB_IMAGE = 1103,
SVGA_3D_CMD_READBACK_GB_SURFACE = 1104,
SVGA_3D_CMD_INVALIDATE_GB_IMAGE = 1105,
SVGA_3D_CMD_INVALIDATE_GB_SURFACE = 1106,
 
SVGA_3D_CMD_DEFINE_GB_CONTEXT = 1107,
SVGA_3D_CMD_DESTROY_GB_CONTEXT = 1108,
SVGA_3D_CMD_BIND_GB_CONTEXT = 1109,
SVGA_3D_CMD_READBACK_GB_CONTEXT = 1110,
SVGA_3D_CMD_INVALIDATE_GB_CONTEXT = 1111,
 
SVGA_3D_CMD_DEFINE_GB_SHADER = 1112,
SVGA_3D_CMD_DESTROY_GB_SHADER = 1113,
SVGA_3D_CMD_BIND_GB_SHADER = 1114,
 
SVGA_3D_CMD_SET_OTABLE_BASE64 = 1115,
 
SVGA_3D_CMD_BEGIN_GB_QUERY = 1116,
SVGA_3D_CMD_END_GB_QUERY = 1117,
SVGA_3D_CMD_WAIT_FOR_GB_QUERY = 1118,
 
SVGA_3D_CMD_NOP = 1119,
 
SVGA_3D_CMD_ENABLE_GART = 1120,
SVGA_3D_CMD_DISABLE_GART = 1121,
SVGA_3D_CMD_MAP_MOB_INTO_GART = 1122,
SVGA_3D_CMD_UNMAP_GART_RANGE = 1123,
 
SVGA_3D_CMD_DEFINE_GB_SCREENTARGET = 1124,
SVGA_3D_CMD_DESTROY_GB_SCREENTARGET = 1125,
SVGA_3D_CMD_BIND_GB_SCREENTARGET = 1126,
SVGA_3D_CMD_UPDATE_GB_SCREENTARGET = 1127,
 
SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL = 1128,
SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL = 1129,
 
SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE = 1130,
 
SVGA_3D_CMD_GB_SCREEN_DMA = 1131,
SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH = 1132,
SVGA_3D_CMD_GB_MOB_FENCE = 1133,
SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 = 1134,
SVGA_3D_CMD_DEFINE_GB_MOB64 = 1135,
SVGA_3D_CMD_REDEFINE_GB_MOB64 = 1136,
SVGA_3D_CMD_NOP_ERROR = 1137,
 
SVGA_3D_CMD_SET_VERTEX_STREAMS = 1138,
SVGA_3D_CMD_SET_VERTEX_DECLS = 1139,
SVGA_3D_CMD_SET_VERTEX_DIVISORS = 1140,
SVGA_3D_CMD_DRAW = 1141,
SVGA_3D_CMD_DRAW_INDEXED = 1142,
 
/*
* DX10 Commands
*/
SVGA_3D_CMD_DX_MIN = 1143,
SVGA_3D_CMD_DX_DEFINE_CONTEXT = 1143,
SVGA_3D_CMD_DX_DESTROY_CONTEXT = 1144,
SVGA_3D_CMD_DX_BIND_CONTEXT = 1145,
SVGA_3D_CMD_DX_READBACK_CONTEXT = 1146,
SVGA_3D_CMD_DX_INVALIDATE_CONTEXT = 1147,
SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER = 1148,
SVGA_3D_CMD_DX_SET_SHADER_RESOURCES = 1149,
SVGA_3D_CMD_DX_SET_SHADER = 1150,
SVGA_3D_CMD_DX_SET_SAMPLERS = 1151,
SVGA_3D_CMD_DX_DRAW = 1152,
SVGA_3D_CMD_DX_DRAW_INDEXED = 1153,
SVGA_3D_CMD_DX_DRAW_INSTANCED = 1154,
SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED = 1155,
SVGA_3D_CMD_DX_DRAW_AUTO = 1156,
SVGA_3D_CMD_DX_SET_INPUT_LAYOUT = 1157,
SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS = 1158,
SVGA_3D_CMD_DX_SET_INDEX_BUFFER = 1159,
SVGA_3D_CMD_DX_SET_TOPOLOGY = 1160,
SVGA_3D_CMD_DX_SET_RENDERTARGETS = 1161,
SVGA_3D_CMD_DX_SET_BLEND_STATE = 1162,
SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE = 1163,
SVGA_3D_CMD_DX_SET_RASTERIZER_STATE = 1164,
SVGA_3D_CMD_DX_DEFINE_QUERY = 1165,
SVGA_3D_CMD_DX_DESTROY_QUERY = 1166,
SVGA_3D_CMD_DX_BIND_QUERY = 1167,
SVGA_3D_CMD_DX_SET_QUERY_OFFSET = 1168,
SVGA_3D_CMD_DX_BEGIN_QUERY = 1169,
SVGA_3D_CMD_DX_END_QUERY = 1170,
SVGA_3D_CMD_DX_READBACK_QUERY = 1171,
SVGA_3D_CMD_DX_SET_PREDICATION = 1172,
SVGA_3D_CMD_DX_SET_SOTARGETS = 1173,
SVGA_3D_CMD_DX_SET_VIEWPORTS = 1174,
SVGA_3D_CMD_DX_SET_SCISSORRECTS = 1175,
SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW = 1176,
SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW = 1177,
SVGA_3D_CMD_DX_PRED_COPY_REGION = 1178,
SVGA_3D_CMD_DX_PRED_COPY = 1179,
SVGA_3D_CMD_DX_STRETCHBLT = 1180,
SVGA_3D_CMD_DX_GENMIPS = 1181,
SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE = 1182,
SVGA_3D_CMD_DX_READBACK_SUBRESOURCE = 1183,
SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE = 1184,
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW = 1185,
SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW = 1186,
SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW = 1187,
SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW = 1188,
SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW = 1189,
SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW = 1190,
SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT = 1191,
SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT = 1192,
SVGA_3D_CMD_DX_DEFINE_BLEND_STATE = 1193,
SVGA_3D_CMD_DX_DESTROY_BLEND_STATE = 1194,
SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE = 1195,
SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE = 1196,
SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE = 1197,
SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE = 1198,
SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE = 1199,
SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE = 1200,
SVGA_3D_CMD_DX_DEFINE_SHADER = 1201,
SVGA_3D_CMD_DX_DESTROY_SHADER = 1202,
SVGA_3D_CMD_DX_BIND_SHADER = 1203,
SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT = 1204,
SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT = 1205,
SVGA_3D_CMD_DX_SET_STREAMOUTPUT = 1206,
SVGA_3D_CMD_DX_SET_COTABLE = 1207,
SVGA_3D_CMD_DX_READBACK_COTABLE = 1208,
SVGA_3D_CMD_DX_BUFFER_COPY = 1209,
SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER = 1210,
SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK = 1211,
SVGA_3D_CMD_DX_MOVE_QUERY = 1212,
SVGA_3D_CMD_DX_BIND_ALL_QUERY = 1213,
SVGA_3D_CMD_DX_READBACK_ALL_QUERY = 1214,
SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER = 1215,
SVGA_3D_CMD_DX_MOB_FENCE_64 = 1216,
SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT = 1217,
SVGA_3D_CMD_DX_HINT = 1218,
SVGA_3D_CMD_DX_BUFFER_UPDATE = 1219,
SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221,
SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
 
/*
* Reserve some IDs to be used for the DX11 shader types.
*/
SVGA_3D_CMD_DX_RESERVED1 = 1223,
SVGA_3D_CMD_DX_RESERVED2 = 1224,
SVGA_3D_CMD_DX_RESERVED3 = 1225,
 
SVGA_3D_CMD_DX_MAX = 1226,
SVGA_3D_CMD_MAX = 1226,
SVGA_3D_CMD_FUTURE_MAX = 3000
} SVGAFifo3dCmdId;
 
/*
* FIFO command format definitions:
*/
 
/*
* The data size header following cmdNum for every 3d command
*/
typedef
#include "vmware_pack_begin.h"
struct {
uint32 id;
uint32 size;
}
#include "vmware_pack_end.h"
SVGA3dCmdHeader;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 numMipLevels;
}
#include "vmware_pack_end.h"
SVGA3dSurfaceFace;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
* structures must have the same value of numMipLevels field.
* Otherwise, all but the first SVGA3dSurfaceFace structures must have the
* numMipLevels set to 0.
*/
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
* A note on surface sizes: Sizes are always specified in pixels,
* even if the true surface size is not a multiple of the minimum
* block size of the surface's format. For example, a 3x3x1 DXT1
* compressed texture would actually be stored as a 4x4x1 image in
* memory.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
* structures must have the same value of numMipLevels field.
* Otherwise, all but the first SVGA3dSurfaceFace structures must have the
* numMipLevels set to 0.
*/
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
* A note on surface sizes: Sizes are always specified in pixels,
* even if the true surface size is not a multiple of the minimum
* block size of the surface's format. For example, a 3x3x1 DXT1
* compressed texture would actually be stored as a 4x4x1 image in
* memory.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dClearFlag clearFlag;
uint32 color;
float depth;
uint32 stencil;
/* Followed by variable number of SVGA3dRect structures */
}
#include "vmware_pack_end.h"
SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dLightType type;
SVGA3dBool inWorldSpace;
float diffuse[4];
float specular[4];
float ambient[4];
float position[4];
float direction[4];
float range;
float falloff;
float attenuation0;
float attenuation1;
float attenuation2;
float theta;
float phi;
}
#include "vmware_pack_end.h"
SVGA3dLightData;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
/* Followed by variable number of SVGA3dCopyRect structures */
}
#include "vmware_pack_end.h"
SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dRenderStateName state;
union {
uint32 uintValue;
float floatValue;
};
}
#include "vmware_pack_end.h"
SVGA3dRenderState;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
/* Followed by variable number of SVGA3dRenderState structures */
}
#include "vmware_pack_end.h"
SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dRenderTargetType type;
SVGA3dSurfaceImageId target;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dest;
/* Followed by variable number of SVGA3dCopyBox structures */
}
#include "vmware_pack_end.h"
SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dest;
SVGA3dBox boxSrc;
SVGA3dBox boxDest;
SVGA3dStretchBltMode mode;
}
#include "vmware_pack_end.h"
SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
 
typedef
#include "vmware_pack_begin.h"
struct {
/*
* If the discard flag is present in a surface DMA operation, the host may
* discard the contents of the current mipmap level and face of the target
* surface before applying the surface DMA contents.
*/
uint32 discard : 1;
 
/*
* If the unsynchronized flag is present, the host may perform this upload
* without syncing to pending reads on this surface.
*/
uint32 unsynchronized : 1;
 
/*
* Guests *MUST* set the reserved bits to 0 before submitting the command
* suffix as future flags may occupy these bits.
*/
uint32 reserved : 30;
}
#include "vmware_pack_end.h"
SVGA3dSurfaceDMAFlags;
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAGuestImage guest;
SVGA3dSurfaceImageId host;
SVGA3dTransferType transfer;
/*
* Followed by variable number of SVGA3dCopyBox structures. For consistency
* in all clipping logic and coordinate translation, we define the
* "source" in each copyBox as the guest image and the
* "destination" as the host image, regardless of transfer
* direction.
*
* For efficiency, the SVGA3D device is free to copy more data than
* specified. For example, it may round copy boxes outwards such
* that they lie on particular alignment boundaries.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
 
/*
* SVGA3dCmdSurfaceDMASuffix --
*
* This is a command suffix that will appear after a SurfaceDMA command in
* the FIFO. It contains some extra information that hosts may use to
* optimize performance or protect the guest. This suffix exists to preserve
* backwards compatibility while also allowing for new functionality to be
* implemented.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 suffixSize;
 
/*
* The maximum offset is used to determine the maximum offset from the
* guestPtr base address that will be accessed or written to during this
* surfaceDMA. If the suffix is supported, the host will respect this
* boundary while performing surface DMAs.
*
* Defaults to MAX_UINT32
*/
uint32 maximumOffset;
 
/*
* A set of flags that describes optimizations that the host may perform
* while performing this surface DMA operation. The guest should never rely
* on behaviour that is different when these flags are set for correctness.
*
* Defaults to 0
*/
SVGA3dSurfaceDMAFlags flags;
}
#include "vmware_pack_end.h"
SVGA3dCmdSurfaceDMASuffix;
 
/*
* SVGA_3D_CMD_DRAW_PRIMITIVES --
*
* This command is the SVGA3D device's generic drawing entry point.
* It can draw multiple ranges of primitives, optionally using an
* index buffer, using an arbitrary collection of vertex buffers.
*
* Each SVGA3dVertexDecl defines a distinct vertex array to bind
* during this draw call. The declarations specify which surface
* the vertex data lives in, what that vertex data is used for,
* and how to interpret it.
*
* Each SVGA3dPrimitiveRange defines a collection of primitives
* to render using the same vertex arrays. An index buffer is
* optional.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
/*
* A range hint is an optional specification for the range of indices
* in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
* that the entire array will be used.
*
* These are only hints. The SVGA3D device may use them for
* performance optimization if possible, but it's also allowed to
* ignore these values.
*/
uint32 first;
uint32 last;
}
#include "vmware_pack_end.h"
SVGA3dArrayRangeHint;
 
typedef
#include "vmware_pack_begin.h"
struct {
/*
* Define the origin and shape of a vertex or index array. Both
* 'offset' and 'stride' are in bytes. The provided surface will be
* reinterpreted as a flat array of bytes in the same format used
* by surface DMA operations. To avoid unnecessary conversions, the
* surface should be created with the SVGA3D_BUFFER format.
*
* Index 0 in the array starts 'offset' bytes into the surface.
* Index 1 begins at byte 'offset + stride', etc. Array indices may
* not be negative.
*/
uint32 surfaceId;
uint32 offset;
uint32 stride;
}
#include "vmware_pack_end.h"
SVGA3dArray;
 
typedef
#include "vmware_pack_begin.h"
struct {
/*
* Describe a vertex array's data type, and define how it is to be
* used by the fixed function pipeline or the vertex shader. It
* isn't useful to have two VertexDecls with the same
* VertexArrayIdentity in one draw call.
*/
SVGA3dDeclType type;
SVGA3dDeclMethod method;
SVGA3dDeclUsage usage;
uint32 usageIndex;
}
#include "vmware_pack_end.h"
SVGA3dVertexArrayIdentity;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dVertexDecl {
SVGA3dVertexArrayIdentity identity;
SVGA3dArray array;
SVGA3dArrayRangeHint rangeHint;
}
#include "vmware_pack_end.h"
SVGA3dVertexDecl;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dPrimitiveRange {
/*
* Define a group of primitives to render, from sequential indices.
*
* The value of 'primitiveType' and 'primitiveCount' imply the
* total number of vertices that will be rendered.
*/
SVGA3dPrimitiveType primType;
uint32 primitiveCount;
 
/*
* Optional index buffer. If indexArray.surfaceId is
* SVGA3D_INVALID_ID, we render without an index buffer. Rendering
* without an index buffer is identical to rendering with an index
* buffer containing the sequence [0, 1, 2, 3, ...].
*
* If an index buffer is in use, indexWidth specifies the width in
* bytes of each index value. It must be less than or equal to
* indexArray.stride.
*
* (Currently, the SVGA3D device requires index buffers to be tightly
* packed. In other words, indexWidth == indexArray.stride)
*/
SVGA3dArray indexArray;
uint32 indexWidth;
 
/*
* Optional index bias. This number is added to all indices from
* indexArray before they are used as vertex array indices. This
* can be used in multiple ways:
*
* - When not using an indexArray, this bias can be used to
* specify where in the vertex arrays to begin rendering.
*
* - A positive number here is equivalent to increasing the
* offset in each vertex array.
*
* - A negative number can be used to render using a small
* vertex array and an index buffer that contains large
* values. This may be used by some applications that
* crop a vertex buffer without modifying their index
* buffer.
*
* Note that rendering with a negative bias value may be slower and
* use more memory than rendering with a positive or zero bias.
*/
int32 indexBias;
}
#include "vmware_pack_end.h"
SVGA3dPrimitiveRange;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 numVertexDecls;
uint32 numRanges;
 
/*
* There are two variable size arrays after the
* SVGA3dCmdDrawPrimitives structure. In order,
* they are:
*
* 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
* SVGA3D_MAX_VERTEX_ARRAYS;
* 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
* SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
* 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
* the frequency divisor for the corresponding vertex decl).
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
 
uint32 primitiveCount; /* How many primitives to render */
uint32 startVertexLocation; /* Which vertex do we start rendering at. */
 
uint8 primitiveType; /* SVGA3dPrimitiveType */
uint8 padding[3];
}
#include "vmware_pack_end.h"
SVGA3dCmdDraw;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
 
uint8 primitiveType; /* SVGA3dPrimitiveType */
 
uint32 indexBufferSid; /* Valid index buffer sid. */
uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */
/* always 0 for DX9 guests, non-zero for OpenGL */
/* guests. We can't represent non-multiple of */
/* stride offsets in D3D9Renderer... */
uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */
 
int32 baseVertexLocation; /* Bias applied to the index when selecting a */
/* vertex from the streams, may be negative */
 
uint32 primitiveCount; /* How many primitives to render */
uint32 pad0;
uint16 pad1;
}
#include "vmware_pack_end.h"
SVGA3dCmdDrawIndexed;
 
typedef
#include "vmware_pack_begin.h"
struct {
/*
* Describe a vertex array's data type, and define how it is to be
* used by the fixed function pipeline or the vertex shader. It
* isn't useful to have two VertexDecls with the same
* VertexArrayIdentity in one draw call.
*/
uint16 streamOffset;
uint8 stream;
uint8 type; /* SVGA3dDeclType */
uint8 method; /* SVGA3dDeclMethod */
uint8 usage; /* SVGA3dDeclUsage */
uint8 usageIndex;
uint8 padding;
 
}
#include "vmware_pack_end.h"
SVGA3dVertexElement;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
 
uint32 numElements;
 
/*
* Followed by numElements SVGA3dVertexElement structures.
*
* If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
* are cleared and will not be used by following draws.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdSetVertexDecls;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
uint32 stride;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGA3dVertexStream;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
 
uint32 numStreams;
/*
* Followed by numStream SVGA3dVertexStream structures.
*
* If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
* are cleared and will not be used by following draws.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdSetVertexStreams;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 numDivisors;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetVertexDivisors;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 stage;
SVGA3dTextureStateName name;
union {
uint32 value;
float floatValue;
};
}
#include "vmware_pack_end.h"
SVGA3dTextureState;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
/* Followed by variable number of SVGA3dTextureState structures */
}
#include "vmware_pack_end.h"
SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dTransformType type;
float matrix[16];
}
#include "vmware_pack_end.h"
SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
 
typedef
#include "vmware_pack_begin.h"
struct {
float min;
float max;
}
#include "vmware_pack_end.h"
SVGA3dZRange;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dZRange zRange;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
 
typedef
#include "vmware_pack_begin.h"
struct {
float diffuse[4];
float ambient[4];
float specular[4];
float emissive[4];
float shininess;
}
#include "vmware_pack_end.h"
SVGA3dMaterial;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dFace face;
SVGA3dMaterial material;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 index;
SVGA3dLightData data;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 index;
uint32 enabled;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dRect rect;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dRect rect;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 index;
float plane[4];
}
#include "vmware_pack_end.h"
SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 shid;
SVGA3dShaderType type;
/* Followed by variable number of DWORDs for shader bycode */
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 shid;
SVGA3dShaderType type;
}
#include "vmware_pack_end.h"
SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 reg; /* register number */
SVGA3dShaderType type;
SVGA3dShaderConstType ctype;
uint32 values[4];
 
/*
* Followed by a variable number of additional values.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dShaderType type;
uint32 shid;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dQueryType type;
}
#include "vmware_pack_end.h"
SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dQueryType type;
SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
}
#include "vmware_pack_end.h"
SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
 
 
/*
* SVGA3D_CMD_WAIT_FOR_QUERY --
*
* Will read the SVGA3dQueryResult structure pointed to by guestResult,
* and if the state member is set to anything else than
* SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
*
* Otherwise, in addition to the query explicitly waited for,
* All queries with the same type and issued with the same cid, for which
* an SVGA_3D_CMD_END_QUERY command has previously been sent, will
* be finished after execution of this command.
*
* A query will be identified by the gmrId and offset of the guestResult
* member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
* been sent previously with an indentical gmrId and offset, it will
* effectively end all queries with an identical type issued with the
* same cid, and the SVGA3dQueryResult structure pointed to by
* guestResult will not be written to. This property can be used to
* implement a query barrier for a given cid and query type.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid; /* Same parameters passed to END_QUERY */
SVGA3dQueryType type;
SVGAGuestPtr guestResult;
}
#include "vmware_pack_end.h"
SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 totalSize; /* Set by guest before query is ended. */
SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
union { /* Set by host on exit from PENDING state */
uint32 result32;
uint32 queryCookie; /* May be used to identify which QueryGetData this
result corresponds to. */
};
}
#include "vmware_pack_end.h"
SVGA3dQueryResult;
 
 
/*
* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
*
* This is a blit from an SVGA3D surface to a Screen Object.
* This blit must be directed at a specific screen.
*
* The blit copies from a rectangular region of an SVGA3D surface
* image to a rectangular region of a screen.
*
* This command takes an optional variable-length list of clipping
* rectangles after the body of the command. If no rectangles are
* specified, there is no clipping region. The entire destRect is
* drawn to. If one or more rectangles are included, they describe
* a clipping region. The clip rectangle coordinates are measured
* relative to the top-left corner of destRect.
*
* The srcImage must be from mip=0 face=0.
*
* This supports scaling if the src and dest are of different sizes.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceImageId srcImage;
SVGASignedRect srcRect;
uint32 destScreenId; /* Screen Object ID */
SVGASignedRect destRect;
/* Clipping: zero or more SVGASignedRects follow */
}
#include "vmware_pack_end.h"
SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
SVGA3dTextureFilter filter;
}
#include "vmware_pack_end.h"
SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
 
 
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
}
#include "vmware_pack_end.h"
SVGA3dCmdActivateSurface; /* SVGA_3D_CMD_ACTIVATE_SURFACE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDeactivateSurface; /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
 
/*
* Screen DMA command
*
* Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2. The SVGA_CAP_3D device
* cap bit is not required.
*
* - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
* be different, but it is required that guest makes sure refBuffer has
* exactly the same contents that were written to when last time screen DMA
* command is received by host.
*
* - changemap is generated by lib/blit, and it has the changes from last
* received screen DMA or more.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdScreenDMA {
uint32 screenId;
SVGAGuestImage refBuffer;
SVGAGuestImage destBuffer;
SVGAGuestImage changeMap;
}
#include "vmware_pack_end.h"
SVGA3dCmdScreenDMA; /* SVGA_3D_CMD_SCREEN_DMA */
 
/*
* Set Unity Surface Cookie
*
* Associates the supplied cookie with the surface id for use with
* Unity. This cookie is a hint from guest to host, there is no way
* for the guest to readback the cookie and the host is free to drop
* the cookie association at will. The default value for the cookie
* on all surfaces is 0.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdSetUnitySurfaceCookie {
uint32 sid;
uint64 cookie;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetUnitySurfaceCookie; /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
 
/*
* Open a context-specific surface in a non-context-specific manner.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdOpenContextSurface {
uint32 sid;
}
#include "vmware_pack_end.h"
SVGA3dCmdOpenContextSurface; /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
 
 
/*
* Logic ops
*/
 
#define SVGA3D_LOTRANSBLT_HONORALPHA (0x01)
#define SVGA3D_LOSTRETCHBLT_MIRRORX (0x01)
#define SVGA3D_LOSTRETCHBLT_MIRRORY (0x02)
#define SVGA3D_LOALPHABLEND_SRCHASALPHA (0x01)
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdLogicOpsBitBlt {
/*
* All LogicOps surfaces are one-level
* surfaces so mipmap & face should always
* be zero.
*/
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dst;
SVGA3dLogicOp logicOp;
/* Followed by variable number of SVGA3dCopyBox structures */
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsBitBlt; /* SVGA_3D_CMD_LOGICOPS_BITBLT */
 
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdLogicOpsTransBlt {
/*
* All LogicOps surfaces are one-level
* surfaces so mipmap & face should always
* be zero.
*/
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dst;
uint32 color;
uint32 flags;
SVGA3dBox srcBox;
SVGA3dBox dstBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
 
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdLogicOpsStretchBlt {
/*
* All LogicOps surfaces are one-level
* surfaces so mipmap & face should always
* be zero.
*/
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dst;
uint16 mode;
uint16 flags;
SVGA3dBox srcBox;
SVGA3dBox dstBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
 
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdLogicOpsColorFill {
/*
* All LogicOps surfaces are one-level
* surfaces so mipmap & face should always
* be zero.
*/
SVGA3dSurfaceImageId dst;
uint32 color;
SVGA3dLogicOp logicOp;
/* Followed by variable number of SVGA3dRect structures. */
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsColorFill; /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
 
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdLogicOpsAlphaBlend {
/*
* All LogicOps surfaces are one-level
* surfaces so mipmap & face should always
* be zero.
*/
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dst;
uint32 alphaVal;
uint32 flags;
SVGA3dBox srcBox;
SVGA3dBox dstBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
 
#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
 
#define SVGA3D_CLEARTYPE_GAMMA_WIDTH 512
#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdLogicOpsClearTypeBlend {
/*
* All LogicOps surfaces are one-level
* surfaces so mipmap & face should always
* be zero.
*/
SVGA3dSurfaceImageId tmp;
SVGA3dSurfaceImageId dst;
SVGA3dSurfaceImageId gammaSurf;
SVGA3dSurfaceImageId alphaSurf;
uint32 gamma;
uint32 color;
uint32 color2;
int32 alphaOffsetX;
int32 alphaOffsetY;
/* Followed by variable number of SVGA3dBox structures */
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsClearTypeBlend; /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
 
 
/*
* Guest-backed objects definitions.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAMobFormat ptDepth;
uint32 sizeInBytes;
PPN64 base;
}
#include "vmware_pack_end.h"
SVGAOTableMobEntry;
#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceFormat format;
SVGA3dSurfaceFlags surfaceFlags;
uint32 numMipLevels;
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
SVGA3dSize size;
SVGAMobId mobid;
uint32 arraySize;
uint32 mobPitch;
uint32 pad[5];
}
#include "vmware_pack_end.h"
SVGAOTableSurfaceEntry;
#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGAMobId mobid;
}
#include "vmware_pack_end.h"
SVGAOTableContextEntry;
#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dShaderType type;
uint32 sizeInBytes;
uint32 offsetInBytes;
SVGAMobId mobid;
}
#include "vmware_pack_end.h"
SVGAOTableShaderEntry;
#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
 
#define SVGA_STFLAG_PRIMARY (1 << 0)
typedef uint32 SVGAScreenTargetFlags;
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceImageId image;
uint32 width;
uint32 height;
int32 xRoot;
int32 yRoot;
SVGAScreenTargetFlags flags;
uint32 dpi;
uint32 pad[7];
}
#include "vmware_pack_end.h"
SVGAOTableScreenTargetEntry;
#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
(sizeof(SVGAOTableScreenTargetEntry))
 
typedef
#include "vmware_pack_begin.h"
struct {
float value[4];
}
#include "vmware_pack_end.h"
SVGA3dShaderConstFloat;
 
typedef
#include "vmware_pack_begin.h"
struct {
int32 value[4];
}
#include "vmware_pack_end.h"
SVGA3dShaderConstInt;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 value;
}
#include "vmware_pack_end.h"
SVGA3dShaderConstBool;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint16 streamOffset;
uint8 stream;
uint8 type;
uint8 methodUsage;
uint8 usageIndex;
}
#include "vmware_pack_end.h"
SVGAGBVertexElement;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 sid;
uint16 stride;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGAGBVertexStream;
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dRect viewport;
SVGA3dRect scissorRect;
SVGA3dZRange zRange;
 
SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
SVGAGBVertexElement decl1[4];
 
uint32 renderStates[SVGA3D_RS_MAX];
SVGAGBVertexElement decl2[18];
uint32 pad0[2];
 
struct {
SVGA3dFace face;
SVGA3dMaterial material;
} material;
 
float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
float matrices[SVGA3D_TRANSFORM_MAX][16];
 
SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
 
/*
* Shaders currently bound
*/
uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
SVGAGBVertexElement decl3[10];
uint32 pad1[3];
 
uint32 occQueryActive;
uint32 occQueryValue;
 
/*
* Int/Bool Shader constants
*/
SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
uint16 pShaderBValues;
uint16 vShaderBValues;
 
 
SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
uint32 numVertexDecls;
uint32 numVertexStreams;
uint32 numVertexDivisors;
uint32 pad2[30];
 
/*
* Texture Stages
*
* SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
* textureStages array.
* SVGA3D_TS_COLOR_KEY is in tsColorKey.
*/
uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
 
/*
* Float Shader constants.
*/
SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
}
#include "vmware_pack_end.h"
SVGAGBContextData;
#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
 
/*
* SVGA3dCmdSetOTableBase --
*
* This command allows the guest to specify the base PPN of the
* specified object table.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAOTableType type;
PPN baseAddress;
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAOTableType type;
PPN64 baseAddress;
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
}
#include "vmware_pack_end.h"
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAOTableType type;
}
#include "vmware_pack_end.h"
SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
 
/*
* Define a memory object (Mob) in the OTable.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBMob {
SVGAMobId mobid;
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
 
 
/*
* Destroys an object in the OTable.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDestroyGBMob {
SVGAMobId mobid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
 
 
/*
* Define a memory object (Mob) in the OTable with a PPN64 base.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBMob64 {
SVGAMobId mobid;
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
 
/*
* Redefine an object in the OTable with PPN64 base.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdRedefineGBMob64 {
SVGAMobId mobid;
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
 
/*
* Notification that the page tables have been modified.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdUpdateGBMobMapping {
SVGAMobId mobid;
}
#include "vmware_pack_end.h"
SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
 
/*
* Define a guest-backed surface.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBSurface {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
uint32 numMipLevels;
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
SVGA3dSize size;
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
 
/*
* Destroy a guest-backed surface.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDestroyGBSurface {
uint32 sid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
 
/*
* Bind a guest-backed surface to a mob.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdBindGBSurface {
uint32 sid;
SVGAMobId mobid;
}
#include "vmware_pack_end.h"
SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdBindGBSurfaceWithPitch {
uint32 sid;
SVGAMobId mobid;
uint32 baseLevelPitch;
}
#include "vmware_pack_end.h"
SVGA3dCmdBindGBSurfaceWithPitch; /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
 
/*
* Conditionally bind a mob to a guest-backed surface if testMobid
* matches the currently bound mob. Optionally issue a
* readback/update on the surface while it is still bound to the old
* mobid if the mobid is changed by this command.
*/
 
#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE (1 << 1)
 
typedef
#include "vmware_pack_begin.h"
struct{
uint32 sid;
SVGAMobId testMobid;
SVGAMobId mobid;
uint32 flags;
}
#include "vmware_pack_end.h"
SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
 
/*
* Update an image in a guest-backed surface.
* (Inform the device that the guest-contents have been updated.)
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdUpdateGBImage {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
}
#include "vmware_pack_end.h"
SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
 
/*
* Update an entire guest-backed surface.
* (Inform the device that the guest-contents have been updated.)
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdUpdateGBSurface {
uint32 sid;
}
#include "vmware_pack_end.h"
SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
 
/*
* Readback an image in a guest-backed surface.
* (Request the device to flush the dirty contents into the guest.)
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdReadbackGBImage {
SVGA3dSurfaceImageId image;
}
#include "vmware_pack_end.h"
SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE */
 
/*
* Readback an entire guest-backed surface.
* (Request the device to flush the dirty contents into the guest.)
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdReadbackGBSurface {
uint32 sid;
}
#include "vmware_pack_end.h"
SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
 
/*
* Readback a sub rect of an image in a guest-backed surface. After
* issuing this command the driver is required to issue an update call
* of the same region before issuing any other commands that reference
* this surface or rendering is not guaranteed.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdReadbackGBImagePartial {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
 
 
/*
* Invalidate an image in a guest-backed surface.
* (Notify the device that the contents can be lost.)
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdInvalidateGBImage {
SVGA3dSurfaceImageId image;
}
#include "vmware_pack_end.h"
SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
 
/*
* Invalidate an entire guest-backed surface.
* (Notify the device that the contents if all images can be lost.)
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdInvalidateGBSurface {
uint32 sid;
}
#include "vmware_pack_end.h"
SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
 
/*
* Invalidate a sub rect of an image in a guest-backed surface. After
* issuing this command the driver is required to issue an update call
* of the same region before issuing any other commands that reference
* this surface or rendering is not guaranteed.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdInvalidateGBImagePartial {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
 
 
/*
* Define a guest-backed context.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBContext {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
 
/*
* Destroy a guest-backed context.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDestroyGBContext {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
 
/*
* Bind a guest-backed context.
*
* validContents should be set to 0 for new contexts,
* and 1 if this is an old context which is getting paged
* back on to the device.
*
* For new contexts, it is recommended that the driver
* issue commands to initialize all interesting state
* prior to rendering.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdBindGBContext {
uint32 cid;
SVGAMobId mobid;
uint32 validContents;
}
#include "vmware_pack_end.h"
SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
 
/*
* Readback a guest-backed context.
* (Request that the device flush the contents back into guest memory.)
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdReadbackGBContext {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
 
/*
* Invalidate a guest-backed context.
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdInvalidateGBContext {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
 
/*
* Define a guest-backed shader.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBShader {
uint32 shid;
SVGA3dShaderType type;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
 
/*
* Bind a guest-backed shader.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdBindGBShader {
uint32 shid;
SVGAMobId mobid;
uint32 offsetInBytes;
}
#include "vmware_pack_end.h"
SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
 
/*
* Destroy a guest-backed shader.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDestroyGBShader {
uint32 shid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
uint32 regStart;
SVGA3dShaderType shaderType;
SVGA3dShaderConstType constType;
 
/*
* Followed by a variable number of shader constants.
*
* Note that FLOAT and INT constants are 4-dwords in length, while
* BOOL constants are 1-dword in length.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
 
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dQueryType type;
}
#include "vmware_pack_end.h"
SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
 
 
/*
* SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
*
* The semantics of this command are identical to the
* SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
* to a Mob instead of a GMR.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
 
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAMobId mobid;
uint32 mustBeZero;
uint32 initialized;
}
#include "vmware_pack_end.h"
SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAMobId mobid;
uint32 gartOffset;
}
#include "vmware_pack_end.h"
SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
 
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 gartOffset;
uint32 numPages;
}
#include "vmware_pack_end.h"
SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
 
 
/*
* Screen Targets
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 stid;
uint32 width;
uint32 height;
int32 xRoot;
int32 yRoot;
SVGAScreenTargetFlags flags;
 
/*
* The physical DPI that the guest expects this screen displayed at.
*
* Guests which are not DPI-aware should set this to zero.
*/
uint32 dpi;
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 stid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 stid;
SVGA3dSurfaceImageId image;
}
#include "vmware_pack_end.h"
SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 stid;
SVGA3dRect rect;
}
#include "vmware_pack_end.h"
SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdGBScreenDMA {
uint32 screenId;
uint32 dead;
SVGAMobId destMobID;
uint32 destPitch;
SVGAMobId changeMapMobID;
}
#include "vmware_pack_end.h"
SVGA3dCmdGBScreenDMA; /* SVGA_3D_CMD_GB_SCREEN_DMA */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 value;
uint32 mobId;
uint32 mobOffset;
}
#include "vmware_pack_end.h"
SVGA3dCmdGBMobFence; /* SVGA_3D_CMD_GB_MOB_FENCE*/
 
#endif /* _SVGA3D_CMD_H_ */
/drivers/video/drm/vmwgfx/device_include/svga3d_devcaps.h
0,0 → 1,457
/**********************************************************
* Copyright 1998-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga3d_devcaps.h --
*
* SVGA 3d caps definitions
*/
 
#ifndef _SVGA3D_DEVCAPS_H_
#define _SVGA3D_DEVCAPS_H_
 
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
 
#include "includeCheck.h"
 
/*
* 3D Hardware Version
*
* The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
* register. Is set by the host and read by the guest. This lets
* us make new guest drivers which are backwards-compatible with old
* SVGA hardware revisions. It does not let us support old guest
* drivers. Good enough for now.
*
*/
 
#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
 
typedef enum {
SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
} SVGA3dHardwareVersion;
 
/*
* DevCap indexes.
*/
 
typedef enum {
SVGA3D_DEVCAP_INVALID = ((uint32)-1),
SVGA3D_DEVCAP_3D = 0,
SVGA3D_DEVCAP_MAX_LIGHTS = 1,
 
/*
* SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
* fixed-function texture units available. Each of these units
* work in both FFP and Shader modes, and they support texture
* transforms and texture coordinates. The host may have additional
* texture image units that are only usable with shaders.
*/
SVGA3D_DEVCAP_MAX_TEXTURES = 2,
SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
SVGA3D_DEVCAP_VERTEX_SHADER = 5,
SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12,
SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13,
SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14,
SVGA3D_DEVCAP_QUERY_TYPES = 15,
SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
SVGA3D_DEVCAP_TEXTURE_OPS = 31,
SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
 
/*
* There is a hole in our devcap definitions for
* historical reasons.
*
* Define a constant just for completeness.
*/
SVGA3D_DEVCAP_MISSING62 = 62,
 
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
 
/*
* Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
* render targets. This does not include the depth or stencil targets.
*/
SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
 
SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
SVGA3D_DEVCAP_SUPERSAMPLE = 73,
SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
 
/*
* This is the maximum number of SVGA context IDs that the guest
* can define using SVGA_3D_CMD_CONTEXT_DEFINE.
*/
SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
 
/*
* This is the maximum number of SVGA surface IDs that the guest
* can define using SVGA_3D_CMD_SURFACE_DEFINE*.
*/
SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
 
SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
 
SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82,
SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83,
 
/*
* Deprecated.
*/
SVGA3D_DEVCAP_DEAD1 = 84,
 
/*
* This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
* ored together, one for every type of video decoding supported.
*/
SVGA3D_DEVCAP_VIDEO_DECODE = 85,
 
/*
* This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
* ored together, one for every type of video processing supported.
*/
SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
 
SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
 
SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
 
/*
* Does the host support the SVGA logic ops commands?
*/
SVGA3D_DEVCAP_LOGICOPS = 92,
 
/*
* Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
*/
SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */
 
/*
* Deprecated.
*/
SVGA3D_DEVCAP_DEAD2 = 94,
 
/*
* Does the device support the DX commands?
*/
SVGA3D_DEVCAP_DX = 95,
 
/*
* What is the maximum size of a texture array?
*
* (Even if this cap is zero, cubemaps are still allowed.)
*/
SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
 
/*
* What is the maximum number of vertex buffers that can
* be used in the DXContext inputAssembly?
*/
SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
 
/*
* What is the maximum number of constant buffers
* that can be expected to work correctly with a
* DX context?
*/
SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
 
/*
* Does the device support provoking vertex control?
* If zero, the first vertex will always be the provoking vertex.
*/
SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
 
SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100,
SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101,
SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102,
SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103,
SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104,
SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105,
SVGA3D_DEVCAP_DXFMT_Z_D32 = 106,
SVGA3D_DEVCAP_DXFMT_Z_D16 = 107,
SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108,
SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109,
SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110,
SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111,
SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112,
SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113,
SVGA3D_DEVCAP_DXFMT_DXT1 = 114,
SVGA3D_DEVCAP_DXFMT_DXT2 = 115,
SVGA3D_DEVCAP_DXFMT_DXT3 = 116,
SVGA3D_DEVCAP_DXFMT_DXT4 = 117,
SVGA3D_DEVCAP_DXFMT_DXT5 = 118,
SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8 = 122,
SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
SVGA3D_DEVCAP_DXFMT_V8U8 = 126,
SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127,
SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128,
SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129,
SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130,
SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131,
SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132,
SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133,
SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134,
SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135,
SVGA3D_DEVCAP_DXFMT_BUFFER = 136,
SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137,
SVGA3D_DEVCAP_DXFMT_V16U16 = 138,
SVGA3D_DEVCAP_DXFMT_G16R16 = 139,
SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140,
SVGA3D_DEVCAP_DXFMT_UYVY = 141,
SVGA3D_DEVCAP_DXFMT_YUY2 = 142,
SVGA3D_DEVCAP_DXFMT_NV12 = 143,
SVGA3D_DEVCAP_DXFMT_AYUV = 144,
SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145,
SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146,
SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147,
SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148,
SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149,
SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150,
SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151,
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152,
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153,
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154,
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155,
SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156,
SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157,
SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS = 161,
SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT = 162,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166,
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167,
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168,
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169,
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170,
SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171,
SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172,
SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173,
SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174,
SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175,
SVGA3D_DEVCAP_DXFMT_R32_UINT = 176,
SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS = 180,
SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT = 181,
SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185,
SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186,
SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187,
SVGA3D_DEVCAP_DXFMT_R16_UINT = 188,
SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189,
SVGA3D_DEVCAP_DXFMT_R16_SINT = 190,
SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191,
SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192,
SVGA3D_DEVCAP_DXFMT_R8_UINT = 193,
SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194,
SVGA3D_DEVCAP_DXFMT_R8_SINT = 195,
SVGA3D_DEVCAP_DXFMT_P8 = 196,
SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197,
SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198,
SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199,
SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200,
SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201,
SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202,
SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203,
SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204,
SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205,
SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206,
SVGA3D_DEVCAP_DXFMT_ATI1 = 207,
SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208,
SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209,
SVGA3D_DEVCAP_DXFMT_ATI2 = 210,
SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211,
SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212,
SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213,
SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214,
SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215,
SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216,
SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217,
SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218,
SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219,
SVGA3D_DEVCAP_DXFMT_YV12 = 220,
SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221,
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222,
SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223,
SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224,
SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225,
SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226,
SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227,
SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228,
SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229,
SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230,
SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231,
SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232,
SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233,
SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234,
SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235,
SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236,
SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237,
SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238,
SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239,
SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240,
SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241,
SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
 
SVGA3D_DEVCAP_MAX /* This must be the last index. */
} SVGA3dDevCapIndex;
 
/*
* Bit definitions for DXFMT devcaps
*
*
* SUPPORTED: Can the format be defined?
* SHADER_SAMPLE: Can the format be sampled from a shader?
* COLOR_RENDERTARGET: Can the format be a color render target?
* DEPTH_RENDERTARGET: Can the format be a depth render target?
* BLENDABLE: Is the format blendable?
* MIPS: Does the format support mip levels?
* ARRAY: Does the format support texture arrays?
* VOLUME: Does the format support having volume?
* MULTISAMPLE_2: Does the format support 2x multisample?
* MULTISAMPLE_4: Does the format support 4x multisample?
* MULTISAMPLE_8: Does the format support 8x multisample?
*/
#define SVGA3D_DXFMT_SUPPORTED (1 << 0)
#define SVGA3D_DXFMT_SHADER_SAMPLE (1 << 1)
#define SVGA3D_DXFMT_COLOR_RENDERTARGET (1 << 2)
#define SVGA3D_DXFMT_DEPTH_RENDERTARGET (1 << 3)
#define SVGA3D_DXFMT_BLENDABLE (1 << 4)
#define SVGA3D_DXFMT_MIPS (1 << 5)
#define SVGA3D_DXFMT_ARRAY (1 << 6)
#define SVGA3D_DXFMT_VOLUME (1 << 7)
#define SVGA3D_DXFMT_DX_VERTEX_BUFFER (1 << 8)
#define SVGADX_DXFMT_MULTISAMPLE_2 (1 << 9)
#define SVGADX_DXFMT_MULTISAMPLE_4 (1 << 10)
#define SVGADX_DXFMT_MULTISAMPLE_8 (1 << 11)
#define SVGADX_DXFMT_MAX (1 << 12)
 
/*
* Convenience mask for any multisample capability.
*
* The multisample bits imply both load and render capability.
*/
#define SVGA3D_DXFMT_MULTISAMPLE ( \
SVGADX_DXFMT_MULTISAMPLE_2 | \
SVGADX_DXFMT_MULTISAMPLE_4 | \
SVGADX_DXFMT_MULTISAMPLE_8 )
 
typedef union {
Bool b;
uint32 u;
int32 i;
float f;
} SVGA3dDevCapResult;
 
#endif /* _SVGA3D_DEVCAPS_H_ */
/drivers/video/drm/vmwgfx/device_include/svga3d_dx.h
0,0 → 1,1487
/**********************************************************
* Copyright 2012-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga3d_dx.h --
*
* SVGA 3d hardware definitions for DX10 support.
*/
 
#ifndef _SVGA3D_DX_H_
#define _SVGA3D_DX_H_
 
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
#include "includeCheck.h"
 
#include "svga3d_limits.h"
 
#define SVGA3D_INPUT_MIN 0
#define SVGA3D_INPUT_PER_VERTEX_DATA 0
#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
#define SVGA3D_INPUT_MAX 2
typedef uint32 SVGA3dInputClassification;
 
#define SVGA3D_RESOURCE_TYPE_MIN 1
#define SVGA3D_RESOURCE_BUFFER 1
#define SVGA3D_RESOURCE_TEXTURE1D 2
#define SVGA3D_RESOURCE_TEXTURE2D 3
#define SVGA3D_RESOURCE_TEXTURE3D 4
#define SVGA3D_RESOURCE_TEXTURECUBE 5
#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
#define SVGA3D_RESOURCE_BUFFEREX 6
#define SVGA3D_RESOURCE_TYPE_MAX 7
typedef uint32 SVGA3dResourceType;
 
#define SVGA3D_DEPTH_WRITE_MASK_ZERO 0
#define SVGA3D_DEPTH_WRITE_MASK_ALL 1
typedef uint8 SVGA3dDepthWriteMask;
 
#define SVGA3D_FILTER_MIP_LINEAR (1 << 0)
#define SVGA3D_FILTER_MAG_LINEAR (1 << 2)
#define SVGA3D_FILTER_MIN_LINEAR (1 << 4)
#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
#define SVGA3D_FILTER_COMPARE (1 << 7)
typedef uint32 SVGA3dFilter;
 
#define SVGA3D_CULL_INVALID 0
#define SVGA3D_CULL_MIN 1
#define SVGA3D_CULL_NONE 1
#define SVGA3D_CULL_FRONT 2
#define SVGA3D_CULL_BACK 3
#define SVGA3D_CULL_MAX 4
typedef uint8 SVGA3dCullMode;
 
#define SVGA3D_COMPARISON_INVALID 0
#define SVGA3D_COMPARISON_MIN 1
#define SVGA3D_COMPARISON_NEVER 1
#define SVGA3D_COMPARISON_LESS 2
#define SVGA3D_COMPARISON_EQUAL 3
#define SVGA3D_COMPARISON_LESS_EQUAL 4
#define SVGA3D_COMPARISON_GREATER 5
#define SVGA3D_COMPARISON_NOT_EQUAL 6
#define SVGA3D_COMPARISON_GREATER_EQUAL 7
#define SVGA3D_COMPARISON_ALWAYS 8
#define SVGA3D_COMPARISON_MAX 9
typedef uint8 SVGA3dComparisonFunc;
 
#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
#define SVGA3D_DX_MAX_SOTARGETS 4
#define SVGA3D_DX_MAX_SRVIEWS 128
#define SVGA3D_DX_MAX_CONSTBUFFERS 16
#define SVGA3D_DX_MAX_SAMPLERS 16
 
/* Id limits */
static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
 
typedef uint32 SVGA3dSurfaceId;
typedef uint32 SVGA3dShaderResourceViewId;
typedef uint32 SVGA3dRenderTargetViewId;
typedef uint32 SVGA3dDepthStencilViewId;
 
typedef uint32 SVGA3dShaderId;
typedef uint32 SVGA3dElementLayoutId;
typedef uint32 SVGA3dSamplerId;
typedef uint32 SVGA3dBlendStateId;
typedef uint32 SVGA3dDepthStencilStateId;
typedef uint32 SVGA3dRasterizerStateId;
typedef uint32 SVGA3dQueryId;
typedef uint32 SVGA3dStreamOutputId;
 
typedef union {
struct {
float r;
float g;
float b;
float a;
};
 
float value[4];
} SVGA3dRGBAFloat;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 cid;
SVGAMobId mobid;
}
#include "vmware_pack_end.h"
SVGAOTableDXContextEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineContext {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineContext; /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyContext {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyContext; /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
 
/*
* Bind a DX context.
*
* validContents should be set to 0 for new contexts,
* and 1 if this is an old context which is getting paged
* back on to the device.
*
* For new contexts, it is recommended that the driver
* issue commands to initialize all interesting state
* prior to rendering.
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBindContext {
uint32 cid;
SVGAMobId mobid;
uint32 validContents;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXBindContext; /* SVGA_3D_CMD_DX_BIND_CONTEXT */
 
/*
* Readback a DX context.
* (Request that the device flush the contents back into guest memory.)
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXReadbackContext {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXReadbackContext; /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
 
/*
* Invalidate a guest-backed context.
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXInvalidateContext {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXInvalidateContext; /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dReplyFormatData {
uint32 formatSupport;
uint32 msaa2xQualityLevels:5;
uint32 msaa4xQualityLevels:5;
uint32 msaa8xQualityLevels:5;
uint32 msaa16xQualityLevels:5;
uint32 msaa32xQualityLevels:5;
uint32 pad:7;
}
#include "vmware_pack_end.h"
SVGA3dReplyFormatData;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetSingleConstantBuffer {
uint32 slot;
SVGA3dShaderType type;
SVGA3dSurfaceId sid;
uint32 offsetInBytes;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetSingleConstantBuffer;
/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetShaderResources {
uint32 startView;
SVGA3dShaderType type;
 
/*
* Followed by a variable number of SVGA3dShaderResourceViewId's.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetShader {
SVGA3dShaderId shaderId;
SVGA3dShaderType type;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetSamplers {
uint32 startSampler;
SVGA3dShaderType type;
 
/*
* Followed by a variable number of SVGA3dSamplerId's.
*/
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDraw {
uint32 vertexCount;
uint32 startVertexLocation;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDrawIndexed {
uint32 indexCount;
uint32 startIndexLocation;
int32 baseVertexLocation;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDrawInstanced {
uint32 vertexCountPerInstance;
uint32 instanceCount;
uint32 startVertexLocation;
uint32 startInstanceLocation;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDrawIndexedInstanced {
uint32 indexCountPerInstance;
uint32 instanceCount;
uint32 startIndexLocation;
int32 baseVertexLocation;
uint32 startInstanceLocation;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDrawAuto {
uint32 pad0;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetInputLayout {
SVGA3dElementLayoutId elementLayoutId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dVertexBuffer {
SVGA3dSurfaceId sid;
uint32 stride;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGA3dVertexBuffer;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetVertexBuffers {
uint32 startBuffer;
/* Followed by a variable number of SVGA3dVertexBuffer's. */
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetIndexBuffer {
SVGA3dSurfaceId sid;
SVGA3dSurfaceFormat format;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetTopology {
SVGA3dPrimitiveType topology;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetRenderTargets {
SVGA3dDepthStencilViewId depthStencilViewId;
/* Followed by a variable number of SVGA3dRenderTargetViewId's. */
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetBlendState {
SVGA3dBlendStateId blendId;
float blendFactor[4];
uint32 sampleMask;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetDepthStencilState {
SVGA3dDepthStencilStateId depthStencilId;
uint32 stencilRef;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetRasterizerState {
SVGA3dRasterizerStateId rasterizerId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
 
#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
typedef uint32 SVGA3dDXQueryFlags;
 
/*
* The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
* to track query state transitions, but are not intended to be used by the
* driver.
*/
#define SVGADX_QDSTATE_INVALID ((uint8)-1) /* Query has no state */
#define SVGADX_QDSTATE_MIN 0
#define SVGADX_QDSTATE_IDLE 0 /* Query hasn't started yet */
#define SVGADX_QDSTATE_ACTIVE 1 /* Query is actively gathering data */
#define SVGADX_QDSTATE_PENDING 2 /* Query is waiting for results */
#define SVGADX_QDSTATE_FINISHED 3 /* Query has completed */
#define SVGADX_QDSTATE_MAX 4
typedef uint8 SVGADXQueryDeviceState;
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dQueryTypeUint8 type;
uint16 pad0;
SVGADXQueryDeviceState state;
SVGA3dDXQueryFlags flags;
SVGAMobId mobid;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGACOTableDXQueryEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineQuery {
SVGA3dQueryId queryId;
SVGA3dQueryType type;
SVGA3dDXQueryFlags flags;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyQuery {
SVGA3dQueryId queryId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBindQuery {
SVGA3dQueryId queryId;
SVGAMobId mobid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetQueryOffset {
SVGA3dQueryId queryId;
uint32 mobOffset;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBeginQuery {
SVGA3dQueryId queryId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXEndQuery {
SVGA3dQueryId queryId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXReadbackQuery {
SVGA3dQueryId queryId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXMoveQuery {
SVGA3dQueryId queryId;
SVGAMobId mobid;
uint32 mobOffset;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBindAllQuery {
uint32 cid;
SVGAMobId mobid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXReadbackAllQuery {
uint32 cid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetPredication {
SVGA3dQueryId queryId;
uint32 predicateValue;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
 
typedef
#include "vmware_pack_begin.h"
struct MKS3dDXSOState {
uint32 offset; /* Starting offset */
uint32 intOffset; /* Internal offset */
uint32 vertexCount; /* vertices written */
uint32 sizeInBytes; /* max bytes to write */
}
#include "vmware_pack_end.h"
SVGA3dDXSOState;
 
/* Set the offset field to this value to append SO values to the buffer */
#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dSoTarget {
SVGA3dSurfaceId sid;
uint32 offset;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
SVGA3dSoTarget;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetSOTargets {
uint32 pad0;
/* Followed by a variable number of SVGA3dSOTarget's. */
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dViewport
{
float x;
float y;
float width;
float height;
float minDepth;
float maxDepth;
}
#include "vmware_pack_end.h"
SVGA3dViewport;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetViewports {
uint32 pad0;
/* Followed by a variable number of SVGA3dViewport's. */
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
 
#define SVGA3D_DX_MAX_VIEWPORTS 16
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetScissorRects {
uint32 pad0;
/* Followed by a variable number of SVGASignedRect's. */
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
 
#define SVGA3D_DX_MAX_SCISSORRECTS 16
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXClearRenderTargetView {
SVGA3dRenderTargetViewId renderTargetViewId;
SVGA3dRGBAFloat rgba;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXClearDepthStencilView {
uint16 flags;
uint16 stencil;
SVGA3dDepthStencilViewId depthStencilViewId;
float depth;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXPredCopyRegion {
SVGA3dSurfaceId dstSid;
uint32 dstSubResource;
SVGA3dSurfaceId srcSid;
uint32 srcSubResource;
SVGA3dCopyBox box;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXPredCopyRegion;
/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXPredCopy {
SVGA3dSurfaceId dstSid;
SVGA3dSurfaceId srcSid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBufferCopy {
SVGA3dSurfaceId dest;
SVGA3dSurfaceId src;
uint32 destX;
uint32 srcX;
uint32 width;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXBufferCopy;
/* SVGA_3D_CMD_DX_BUFFER_COPY */
 
typedef uint32 SVGA3dDXStretchBltMode;
#define SVGADX_STRETCHBLT_LINEAR (1 << 0)
#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXStretchBlt {
SVGA3dSurfaceId srcSid;
uint32 srcSubResource;
SVGA3dSurfaceId dstSid;
uint32 destSubResource;
SVGA3dBox boxSrc;
SVGA3dBox boxDest;
SVGA3dDXStretchBltMode mode;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXGenMips {
SVGA3dShaderResourceViewId shaderResourceViewId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
 
/*
* Defines a resource/DX surface. Resources share the surfaceId namespace.
*
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDefineGBSurface_v2 {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
uint32 numMipLevels;
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
SVGA3dSize size;
uint32 arraySize;
uint32 pad;
}
#include "vmware_pack_end.h"
SVGA3dCmdDefineGBSurface_v2; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
 
/*
* Update a sub-resource in a guest-backed resource.
* (Inform the device that the guest-contents have been updated.)
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXUpdateSubResource {
SVGA3dSurfaceId sid;
uint32 subResource;
SVGA3dBox box;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXUpdateSubResource; /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
 
/*
* Readback a subresource in a guest-backed resource.
* (Request the device to flush the dirty contents into the guest.)
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXReadbackSubResource {
SVGA3dSurfaceId sid;
uint32 subResource;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXReadbackSubResource; /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
 
/*
* Invalidate an image in a guest-backed surface.
* (Notify the device that the contents can be lost.)
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXInvalidateSubResource {
SVGA3dSurfaceId sid;
uint32 subResource;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXInvalidateSubResource; /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
 
 
/*
* Raw byte wise transfer from a buffer surface into another surface
* of the requested box.
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXTransferFromBuffer {
SVGA3dSurfaceId srcSid;
uint32 srcOffset;
uint32 srcPitch;
uint32 srcSlicePitch;
SVGA3dSurfaceId destSid;
uint32 destSubResource;
SVGA3dBox destBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
 
 
/*
* Raw byte wise transfer from a buffer surface into another surface
* of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
* The context is implied from the command buffer header.
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXPredTransferFromBuffer {
SVGA3dSurfaceId srcSid;
uint32 srcOffset;
uint32 srcPitch;
uint32 srcSlicePitch;
SVGA3dSurfaceId destSid;
uint32 destSubResource;
SVGA3dBox destBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXPredTransferFromBuffer;
/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
 
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSurfaceCopyAndReadback {
SVGA3dSurfaceId srcSid;
SVGA3dSurfaceId destSid;
SVGA3dCopyBox box;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSurfaceCopyAndReadback;
/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
 
 
typedef
#include "vmware_pack_begin.h"
struct {
union {
struct {
uint32 firstElement;
uint32 numElements;
uint32 pad0;
uint32 pad1;
} buffer;
struct {
uint32 mostDetailedMip;
uint32 firstArraySlice;
uint32 mipLevels;
uint32 arraySize;
} tex;
struct {
uint32 firstElement;
uint32 numElements;
uint32 flags;
uint32 pad0;
} bufferex;
};
}
#include "vmware_pack_end.h"
SVGA3dShaderResourceViewDesc;
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceId sid;
SVGA3dSurfaceFormat format;
SVGA3dResourceType resourceDimension;
SVGA3dShaderResourceViewDesc desc;
uint32 pad;
}
#include "vmware_pack_end.h"
SVGACOTableDXSRViewEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineShaderResourceView {
SVGA3dShaderResourceViewId shaderResourceViewId;
 
SVGA3dSurfaceId sid;
SVGA3dSurfaceFormat format;
SVGA3dResourceType resourceDimension;
 
SVGA3dShaderResourceViewDesc desc;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineShaderResourceView;
/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyShaderResourceView {
SVGA3dShaderResourceViewId shaderResourceViewId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyShaderResourceView;
/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dRenderTargetViewDesc {
union {
struct {
uint32 firstElement;
uint32 numElements;
} buffer;
struct {
uint32 mipSlice;
uint32 firstArraySlice;
uint32 arraySize;
} tex; /* 1d, 2d, cube */
struct {
uint32 mipSlice;
uint32 firstW;
uint32 wSize;
} tex3D;
};
}
#include "vmware_pack_end.h"
SVGA3dRenderTargetViewDesc;
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceId sid;
SVGA3dSurfaceFormat format;
SVGA3dResourceType resourceDimension;
SVGA3dRenderTargetViewDesc desc;
uint32 pad[2];
}
#include "vmware_pack_end.h"
SVGACOTableDXRTViewEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineRenderTargetView {
SVGA3dRenderTargetViewId renderTargetViewId;
 
SVGA3dSurfaceId sid;
SVGA3dSurfaceFormat format;
SVGA3dResourceType resourceDimension;
 
SVGA3dRenderTargetViewDesc desc;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineRenderTargetView;
/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyRenderTargetView {
SVGA3dRenderTargetViewId renderTargetViewId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyRenderTargetView;
/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
 
/*
*/
#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH 0x01
#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK 0x03
typedef uint8 SVGA3DCreateDSViewFlags;
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dSurfaceId sid;
SVGA3dSurfaceFormat format;
SVGA3dResourceType resourceDimension;
uint32 mipSlice;
uint32 firstArraySlice;
uint32 arraySize;
SVGA3DCreateDSViewFlags flags;
uint8 pad0;
uint16 pad1;
uint32 pad2;
}
#include "vmware_pack_end.h"
SVGACOTableDXDSViewEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineDepthStencilView {
SVGA3dDepthStencilViewId depthStencilViewId;
 
SVGA3dSurfaceId sid;
SVGA3dSurfaceFormat format;
SVGA3dResourceType resourceDimension;
uint32 mipSlice;
uint32 firstArraySlice;
uint32 arraySize;
SVGA3DCreateDSViewFlags flags;
uint8 pad0;
uint16 pad1;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineDepthStencilView;
/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyDepthStencilView {
SVGA3dDepthStencilViewId depthStencilViewId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyDepthStencilView;
/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dInputElementDesc {
uint32 inputSlot;
uint32 alignedByteOffset;
SVGA3dSurfaceFormat format;
SVGA3dInputClassification inputSlotClass;
uint32 instanceDataStepRate;
uint32 inputRegister;
}
#include "vmware_pack_end.h"
SVGA3dInputElementDesc;
 
typedef
#include "vmware_pack_begin.h"
struct {
/*
* XXX: How many of these can there be?
*/
uint32 elid;
uint32 numDescs;
SVGA3dInputElementDesc desc[32];
uint32 pad[62];
}
#include "vmware_pack_end.h"
SVGACOTableDXElementLayoutEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineElementLayout {
SVGA3dElementLayoutId elementLayoutId;
/* Followed by a variable number of SVGA3dInputElementDesc's. */
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineElementLayout;
/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyElementLayout {
SVGA3dElementLayoutId elementLayoutId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyElementLayout;
/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
 
 
#define SVGA3D_DX_MAX_RENDER_TARGETS 8
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dDXBlendStatePerRT {
uint8 blendEnable;
uint8 srcBlend;
uint8 destBlend;
uint8 blendOp;
uint8 srcBlendAlpha;
uint8 destBlendAlpha;
uint8 blendOpAlpha;
uint8 renderTargetWriteMask;
uint8 logicOpEnable;
uint8 logicOp;
uint16 pad0;
}
#include "vmware_pack_end.h"
SVGA3dDXBlendStatePerRT;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint8 alphaToCoverageEnable;
uint8 independentBlendEnable;
uint16 pad0;
SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
uint32 pad1[7];
}
#include "vmware_pack_end.h"
SVGACOTableDXBlendStateEntry;
 
/*
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineBlendState {
SVGA3dBlendStateId blendId;
uint8 alphaToCoverageEnable;
uint8 independentBlendEnable;
uint16 pad0;
SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyBlendState {
SVGA3dBlendStateId blendId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint8 depthEnable;
SVGA3dDepthWriteMask depthWriteMask;
SVGA3dComparisonFunc depthFunc;
uint8 stencilEnable;
uint8 frontEnable;
uint8 backEnable;
uint8 stencilReadMask;
uint8 stencilWriteMask;
 
uint8 frontStencilFailOp;
uint8 frontStencilDepthFailOp;
uint8 frontStencilPassOp;
SVGA3dComparisonFunc frontStencilFunc;
 
uint8 backStencilFailOp;
uint8 backStencilDepthFailOp;
uint8 backStencilPassOp;
SVGA3dComparisonFunc backStencilFunc;
}
#include "vmware_pack_end.h"
SVGACOTableDXDepthStencilEntry;
 
/*
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineDepthStencilState {
SVGA3dDepthStencilStateId depthStencilId;
 
uint8 depthEnable;
SVGA3dDepthWriteMask depthWriteMask;
SVGA3dComparisonFunc depthFunc;
uint8 stencilEnable;
uint8 frontEnable;
uint8 backEnable;
uint8 stencilReadMask;
uint8 stencilWriteMask;
 
uint8 frontStencilFailOp;
uint8 frontStencilDepthFailOp;
uint8 frontStencilPassOp;
SVGA3dComparisonFunc frontStencilFunc;
 
uint8 backStencilFailOp;
uint8 backStencilDepthFailOp;
uint8 backStencilPassOp;
SVGA3dComparisonFunc backStencilFunc;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineDepthStencilState;
/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyDepthStencilState {
SVGA3dDepthStencilStateId depthStencilId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyDepthStencilState;
/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint8 fillMode;
SVGA3dCullMode cullMode;
uint8 frontCounterClockwise;
uint8 provokingVertexLast;
int32 depthBias;
float depthBiasClamp;
float slopeScaledDepthBias;
uint8 depthClipEnable;
uint8 scissorEnable;
uint8 multisampleEnable;
uint8 antialiasedLineEnable;
float lineWidth;
uint8 lineStippleEnable;
uint8 lineStippleFactor;
uint16 lineStipplePattern;
uint32 forcedSampleCount;
}
#include "vmware_pack_end.h"
SVGACOTableDXRasterizerStateEntry;
 
/*
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineRasterizerState {
SVGA3dRasterizerStateId rasterizerId;
 
uint8 fillMode;
SVGA3dCullMode cullMode;
uint8 frontCounterClockwise;
uint8 provokingVertexLast;
int32 depthBias;
float depthBiasClamp;
float slopeScaledDepthBias;
uint8 depthClipEnable;
uint8 scissorEnable;
uint8 multisampleEnable;
uint8 antialiasedLineEnable;
float lineWidth;
uint8 lineStippleEnable;
uint8 lineStippleFactor;
uint16 lineStipplePattern;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineRasterizerState;
/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyRasterizerState {
SVGA3dRasterizerStateId rasterizerId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyRasterizerState;
/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGA3dFilter filter;
uint8 addressU;
uint8 addressV;
uint8 addressW;
uint8 pad0;
float mipLODBias;
uint8 maxAnisotropy;
SVGA3dComparisonFunc comparisonFunc;
uint16 pad1;
SVGA3dRGBAFloat borderColor;
float minLOD;
float maxLOD;
uint32 pad2[6];
}
#include "vmware_pack_end.h"
SVGACOTableDXSamplerEntry;
 
/*
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineSamplerState {
SVGA3dSamplerId samplerId;
SVGA3dFilter filter;
uint8 addressU;
uint8 addressV;
uint8 addressW;
uint8 pad0;
float mipLODBias;
uint8 maxAnisotropy;
SVGA3dComparisonFunc comparisonFunc;
uint16 pad1;
SVGA3dRGBAFloat borderColor;
float minLOD;
float maxLOD;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroySamplerState {
SVGA3dSamplerId samplerId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
 
/*
*/
typedef
#include "vmware_pack_begin.h"
struct SVGA3dSignatureEntry {
uint8 systemValue;
uint8 reg; /* register is a reserved word */
uint16 mask;
uint8 registerComponentType;
uint8 minPrecision;
uint16 pad0;
}
#include "vmware_pack_end.h"
SVGA3dSignatureEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineShader {
SVGA3dShaderId shaderId;
SVGA3dShaderType type;
uint32 sizeInBytes; /* Number of bytes of shader text. */
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
 
typedef
#include "vmware_pack_begin.h"
struct SVGACOTableDXShaderEntry {
SVGA3dShaderType type;
uint32 sizeInBytes;
uint32 offsetInBytes;
SVGAMobId mobid;
uint32 numInputSignatureEntries;
uint32 numOutputSignatureEntries;
 
uint32 numPatchConstantSignatureEntries;
 
uint32 pad;
}
#include "vmware_pack_end.h"
SVGACOTableDXShaderEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyShader {
SVGA3dShaderId shaderId;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXBindShader {
uint32 cid;
uint32 shid;
SVGAMobId mobid;
uint32 offsetInBytes;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXBindShader; /* SVGA_3D_CMD_DX_BIND_SHADER */
 
/*
* The maximum number of streamout decl's in each streamout entry.
*/
#define SVGA3D_MAX_STREAMOUT_DECLS 64
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dStreamOutputDeclarationEntry {
uint32 outputSlot;
uint32 registerIndex;
uint8 registerMask;
uint8 pad0;
uint16 pad1;
uint32 stream;
}
#include "vmware_pack_end.h"
SVGA3dStreamOutputDeclarationEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGAOTableStreamOutputEntry {
uint32 numOutputStreamEntries;
SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
uint32 rasterizedStream;
uint32 pad[250];
}
#include "vmware_pack_end.h"
SVGACOTableDXStreamOutputEntry;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineStreamOutput {
SVGA3dStreamOutputId soid;
uint32 numOutputStreamEntries;
SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
uint32 rasterizedStream;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyStreamOutput {
SVGA3dStreamOutputId soid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetStreamOutput {
SVGA3dStreamOutputId soid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
 
typedef
#include "vmware_pack_begin.h"
struct {
uint64 value;
uint32 mobId;
uint32 mobOffset;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXMobFence64; /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
 
/*
* SVGA3dCmdSetCOTable --
*
* This command allows the guest to bind a mob to a context-object table.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetCOTable {
uint32 cid;
uint32 mobid;
SVGACOTableType type;
uint32 validSizeInBytes;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXReadbackCOTable {
uint32 cid;
SVGACOTableType type;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCOTableData {
uint32 mobid;
}
#include "vmware_pack_end.h"
SVGA3dCOTableData;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dBufferBinding {
uint32 bufferId;
uint32 stride;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGA3dBufferBinding;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dConstantBufferBinding {
uint32 sid;
uint32 offsetInBytes;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
SVGA3dConstantBufferBinding;
 
typedef
#include "vmware_pack_begin.h"
struct SVGADXInputAssemblyMobFormat {
uint32 layoutId;
SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
uint32 indexBufferSid;
uint32 pad;
uint32 indexBufferOffset;
uint32 indexBufferFormat;
uint32 topology;
}
#include "vmware_pack_end.h"
SVGADXInputAssemblyMobFormat;
 
typedef
#include "vmware_pack_begin.h"
struct SVGADXContextMobFormat {
SVGADXInputAssemblyMobFormat inputAssembly;
 
struct {
uint32 blendStateId;
uint32 blendFactor[4];
uint32 sampleMask;
uint32 depthStencilStateId;
uint32 stencilRef;
uint32 rasterizerStateId;
uint32 depthStencilViewId;
uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
} renderState;
 
struct {
uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
uint32 soid;
} streamOut;
uint32 pad0[11];
 
uint8 numViewports;
uint8 numScissorRects;
uint16 pad1[1];
 
uint32 pad2[3];
 
SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
uint32 pad3[32];
 
SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
uint32 pad4[64];
 
struct {
uint32 queryID;
uint32 value;
} predication;
uint32 pad5[2];
 
struct {
uint32 shaderId;
SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
} shaderState[SVGA3D_NUM_SHADERTYPE];
uint32 pad6[26];
 
SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
 
SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
uint32 pad7[381];
}
#include "vmware_pack_end.h"
SVGADXContextMobFormat;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXTempSetContext {
uint32 dxcid;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
 
#endif /* _SVGA3D_DX_H_ */
/drivers/video/drm/vmwgfx/device_include/svga3d_limits.h
0,0 → 1,99
/**********************************************************
* Copyright 2007-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga3d_limits.h --
*
* SVGA 3d hardware limits
*/
 
#ifndef _SVGA3D_LIMITS_H_
#define _SVGA3D_LIMITS_H_
 
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
 
#include "includeCheck.h"
 
#define SVGA3D_NUM_CLIPPLANES 6
#define SVGA3D_MAX_RENDER_TARGETS 8
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS)
#define SVGA3D_MAX_UAVIEWS 8
#define SVGA3D_MAX_CONTEXT_IDS 256
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
 
/*
* Maximum ID a shader can be assigned on a given context.
*/
#define SVGA3D_MAX_SHADERIDS 5000
/*
* Maximum number of shaders of a given type that can be defined
* (including all contexts).
*/
#define SVGA3D_MAX_SIMULTANEOUS_SHADERS 20000
 
#define SVGA3D_NUM_TEXTURE_UNITS 32
#define SVGA3D_NUM_LIGHTS 8
 
/*
* Maximum size in dwords of shader text the SVGA device will allow.
* Currently 8 MB.
*/
#define SVGA3D_MAX_SHADER_MEMORY (8 * 1024 * 1024 / sizeof(uint32))
 
#define SVGA3D_MAX_CLIP_PLANES 6
 
/*
* This is the limit to the number of fixed-function texture
* transforms and texture coordinates we can support. It does *not*
* correspond to the number of texture image units (samplers) we
* support!
*/
#define SVGA3D_MAX_TEXTURE_COORDS 8
 
/*
* Number of faces in a cubemap.
*/
#define SVGA3D_MAX_SURFACE_FACES 6
 
/*
* Maximum number of array indexes in a GB surface (with DX enabled).
*/
#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
 
/*
* The maximum number of vertex arrays we're guaranteed to support in
* SVGA_3D_CMD_DRAWPRIMITIVES.
*/
#define SVGA3D_MAX_VERTEX_ARRAYS 32
 
/*
* The maximum number of primitive ranges we're guaranteed to support
* in SVGA_3D_CMD_DRAWPRIMITIVES.
*/
#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
 
#endif /* _SVGA3D_LIMITS_H_ */
/drivers/video/drm/vmwgfx/device_include/svga3d_reg.h
0,0 → 1,50
/**********************************************************
* Copyright 1998-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga3d_reg.h --
*
* SVGA 3d hardware definitions
*/
 
#ifndef _SVGA3D_REG_H_
#define _SVGA3D_REG_H_
 
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
 
#include "includeCheck.h"
 
#include "svga_reg.h"
 
#include "svga3d_types.h"
#include "svga3d_limits.h"
#include "svga3d_cmd.h"
#include "svga3d_dx.h"
#include "svga3d_devcaps.h"
 
 
#endif /* _SVGA3D_REG_H_ */
/drivers/video/drm/vmwgfx/device_include/svga3d_surfacedefs.h
0,0 → 1,1204
/**************************************************************************
*
* Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#ifdef __KERNEL__
 
#include <drm/vmwgfx_drm.h>
#define surf_size_struct struct drm_vmw_size
 
#else /* __KERNEL__ */
 
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
#endif /* ARRAY_SIZE */
 
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
 
#endif /* __KERNEL__ */
 
#include "svga3d_reg.h"
 
/*
* enum svga3d_block_desc describes the active data channels in a block.
*
* There can be at-most four active channels in a block:
* 1. Red, bump W, luminance and depth are stored in the first channel.
* 2. Green, bump V and stencil are stored in the second channel.
* 3. Blue and bump U are stored in the third channel.
* 4. Alpha and bump Q are stored in the fourth channel.
*
* Block channels can be used to store compressed and buffer data:
* 1. For compressed formats, only the data channel is used and its size
* is equal to that of a singular block in the compression scheme.
* 2. For buffer formats, only the data channel is used and its size is
* exactly one byte in length.
* 3. In each case the bit depth represent the size of a singular block.
*
* Note: Compressed and IEEE formats do not use the bitMask structure.
*/
 
enum svga3d_block_desc {
SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
data */
SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
data */
SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
U and V */
SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
data */
SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
data */
SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
channel */
SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
data */
SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
data */
SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
data */
SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
data */
SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
channel */
SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
data */
SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
data */
SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
data depending on the
compression method used */
SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
floating point
representation in
all channels */
SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
data. */
SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
e.g., NV12. */
SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
Y, U, V, e.g., YV12. */
 
SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
SVGA3DBLOCKDESC_GREEN,
SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
SVGA3DBLOCKDESC_BLUE,
SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
SVGA3DBLOCKDESC_SRGB,
SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
SVGA3DBLOCKDESC_ALPHA,
SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
SVGA3DBLOCKDESC_SRGB,
SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
SVGA3DBLOCKDESC_V,
SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
SVGA3DBLOCKDESC_LUMINANCE,
SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
SVGA3DBLOCKDESC_W,
SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
SVGA3DBLOCKDESC_ALPHA,
SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
SVGA3DBLOCKDESC_V |
SVGA3DBLOCKDESC_W |
SVGA3DBLOCKDESC_Q,
SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
SVGA3DBLOCKDESC_ALPHA,
SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
SVGA3DBLOCKDESC_IEEE_FP,
SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
SVGA3DBLOCKDESC_GREEN,
SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
SVGA3DBLOCKDESC_BLUE,
SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
SVGA3DBLOCKDESC_ALPHA,
SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
SVGA3DBLOCKDESC_STENCIL,
SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
SVGA3DBLOCKDESC_Y,
SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
SVGA3DBLOCKDESC_Y |
SVGA3DBLOCKDESC_U_VIDEO |
SVGA3DBLOCKDESC_V_VIDEO,
SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
SVGA3DBLOCKDESC_EXP,
SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
SVGA3DBLOCKDESC_SRGB,
SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
SVGA3DBLOCKDESC_2PLANAR_YUV,
SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
SVGA3DBLOCKDESC_3PLANAR_YUV,
};
 
/*
* SVGA3dSurfaceDesc describes the actual pixel data.
*
* This structure provides the following information:
* 1. Block description.
* 2. Dimensions of a block in the surface.
* 3. Size of block in bytes.
* 4. Bit depth of the pixel data.
* 5. Channel bit depths and masks (if applicable).
*/
struct svga3d_channel_def {
union {
u8 blue;
u8 u;
u8 uv_video;
u8 u_video;
};
union {
u8 green;
u8 v;
u8 stencil;
u8 v_video;
};
union {
u8 red;
u8 w;
u8 luminance;
u8 y;
u8 depth;
u8 data;
};
union {
u8 alpha;
u8 q;
u8 exp;
};
};
 
struct svga3d_surface_desc {
SVGA3dSurfaceFormat format;
enum svga3d_block_desc block_desc;
surf_size_struct block_size;
u32 bytes_per_block;
u32 pitch_bytes_per_block;
 
u32 total_bit_depth;
struct svga3d_channel_def bit_depth;
struct svga3d_channel_def bit_offset;
};
 
static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 0, 0,
0, {{0}, {0}, {0}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 4, 4,
24, {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 2, 2,
16, {{5}, {6}, {5}, {0}},
{{0}, {5}, {11}, {0}}},
 
{SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 2, 2,
15, {{5}, {5}, {5}, {0}},
{{0}, {5}, {10}, {0}}},
 
{SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 2, 2,
16, {{5}, {5}, {5}, {1}},
{{0}, {5}, {10}, {15}}},
 
{SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 2, 2,
16, {{4}, {4}, {4}, {4}},
{{0}, {4}, {8}, {12}}},
 
{SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 4, 4,
32, {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
 
{SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 2, 2,
16, {{0}, {1}, {15}, {0}},
{{0}, {15}, {0}, {0}}},
 
{SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
{1 , 1, 1}, 1, 1,
8, {{0}, {0}, {4}, {4}},
{{0}, {0}, {0}, {4}}},
 
{SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {8}, {8}},
{{0}, {0}, {0}, {8}}},
 
{SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8,
64, {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {8}, {8}},
{{0}, {0}, {0}, {8}}},
 
{SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 2, 2,
16, {{5}, {5}, {6}, {0}},
{{11}, {6}, {0}, {0}}},
 
{SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
 
{SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 3, 3,
24, {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
 
{SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8,
64, {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
 
{SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16,
128, {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
 
{SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
 
{SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 2, 2,
16, {{8}, {8}, {0}, {0}},
{{8}, {0}, {0}, {0}}},
 
{SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{24}, {16}, {8}, {0}}},
 
{SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 2, 2,
16, {{8}, {8}, {0}, {0}},
{{8}, {0}, {0}, {0}}},
 
{SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4,
24, {{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
 
{SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
{1, 1, 1}, 4, 4,
32, {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
 
{SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {0}, {8}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
32, {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
 
{SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8,
64, {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
 
{SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
 
{SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 4, 4,
32, {{16}, {16}, {0}, {0}},
{{16}, {0}, {0}, {0}}},
 
{SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 4, 4,
32, {{0}, {16}, {16}, {0}},
{{0}, {0}, {16}, {0}}},
 
{SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 8, 8,
64, {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
 
{SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
{1, 1, 1}, 2, 2,
16, {{8}, {0}, {8}, {0}},
{{0}, {0}, {8}, {0}}},
 
{SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
{1, 1, 1}, 2, 2,
16, {{8}, {0}, {8}, {0}},
{{8}, {0}, {0}, {0}}},
 
{SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
{2, 2, 1}, 6, 2,
48, {{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 16, 16,
128, {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
 
{SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 16, 16,
128, {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
 
{SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
{1, 1, 1}, 16, 16,
128, {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
 
{SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 12, 12,
96, {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
 
{SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 12, 12,
96, {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
 
{SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 12, 12,
96, {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
 
{SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
{1, 1, 1}, 12, 12,
96, {{32}, {32}, {32}, {0}},
{{64}, {32}, {0}, {0}}},
 
{SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 8, 8,
64, {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
 
{SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 8, 8,
64, {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
 
{SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
{1, 1, 1}, 8, 8,
64, {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
 
{SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
{1, 1, 1}, 8, 8,
64, {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
 
{SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 8, 8,
64, {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
 
{SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 8, 8,
64, {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
 
{SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 8, 8,
64, {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
 
{SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 8, 8,
64, {{0}, {8}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
 
{SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 8, 8,
64, {{0}, {8}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
 
{SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 8, 8,
64, {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
{1, 1, 1}, 8, 8,
64, {{0}, {8}, {0}, {0}},
{{0}, {32}, {0}, {0}}},
 
{SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
 
{SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
 
{SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 4, 4,
32, {{10}, {11}, {11}, {0}},
{{0}, {10}, {21}, {0}}},
 
{SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
 
{SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
 
{SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
 
{SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
 
{SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{16}, {8}, {0}, {24}}},
 
{SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 4, 4,
32, {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
 
{SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
32, {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
 
{SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 4, 4,
32, {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
 
{SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 4, 4,
32, {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
 
{SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 4, 4,
32, {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
 
{SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {24}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
{1, 1, 1}, 4, 4,
32, {{0}, {8}, {0}, {0}},
{{0}, {24}, {0}, {0}}},
 
{SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2,
16, {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
 
{SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2,
16, {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
 
{SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2,
16, {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
 
{SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 2, 2,
16, {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
 
{SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_P8, SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {8}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
{1, 1, 1}, 4, 4,
32, {{9}, {9}, {9}, {5}},
{{18}, {9}, {0}, {27}}},
 
{SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2,
16, {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
 
{SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2,
16, {{0}, {8}, {8}, {0}},
{{0}, {8}, {0}, {0}}},
 
{SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8,
64, {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
{4, 4, 1}, 8, 8,
64, {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8,
64, {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8,
64, {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8,
64, {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
 
{SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 4, 4,
24, {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
{1, 1, 1}, 4, 4,
24, {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 4, 4,
32, {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
 
{SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 4, 4,
32, {{0}, {8}, {24}, {0}},
{{0}, {24}, {0}, {0}}},
 
{SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
{2, 2, 1}, 6, 2,
48, {{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16,
128, {{32}, {32}, {32}, {32}},
{{64}, {32}, {0}, {96}}},
 
{SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8,
64, {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
 
{SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 8, 8,
64, {{16}, {16}, {16}, {16}},
{{32}, {16}, {0}, {48}}},
 
{SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8,
64, {{0}, {32}, {32}, {0}},
{{0}, {32}, {0}, {0}}},
 
{SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{10}, {10}, {10}, {2}},
{{0}, {10}, {20}, {30}}},
 
{SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{24}, {16}, {8}, {0}}},
 
{SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4,
32, {{0}, {16}, {16}, {0}},
{{0}, {16}, {0}, {0}}},
 
{SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 4, 4,
32, {{0}, {16}, {16}, {0}},
{{0}, {0}, {16}, {0}}},
 
{SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 4, 4,
32, {{16}, {16}, {0}, {0}},
{{16}, {0}, {0}, {0}}},
 
{SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4,
32, {{0}, {0}, {32}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2,
16, {{8}, {8}, {0}, {0}},
{{8}, {0}, {0}, {0}}},
 
{SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 2, 2,
16, {{0}, {0}, {16}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
{1, 1, 1}, 1, 1,
8, {{0}, {0}, {0}, {8}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8,
64, {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 2, 2,
16, {{5}, {6}, {5}, {0}},
{{0}, {5}, {11}, {0}}},
 
{SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 2, 2,
16, {{5}, {5}, {5}, {1}},
{{0}, {5}, {10}, {15}}},
 
{SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4,
32, {{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 4, 4,
24, {{8}, {8}, {8}, {0}},
{{0}, {8}, {16}, {24}}},
 
{SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8,
64, {{0}, {0}, {64}, {0}},
{{0}, {0}, {0}, {0}}},
 
{SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16,
128, {{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
 
};
 
static inline u32 clamped_umul32(u32 a, u32 b)
{
uint64_t tmp = (uint64_t) a*b;
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
}
 
static inline const struct svga3d_surface_desc *
svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
{
if (format < ARRAY_SIZE(svga3d_surface_descs))
return &svga3d_surface_descs[format];
 
return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
}
 
/*
*----------------------------------------------------------------------
*
* svga3dsurface_get_mip_size --
*
* Given a base level size and the mip level, compute the size of
* the mip level.
*
* Results:
* See above.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
 
static inline surf_size_struct
svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
{
surf_size_struct size;
 
size.width = max_t(u32, base_level.width >> mip_level, 1);
size.height = max_t(u32, base_level.height >> mip_level, 1);
size.depth = max_t(u32, base_level.depth >> mip_level, 1);
return size;
}
 
static inline void
svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
const surf_size_struct *pixel_size,
surf_size_struct *block_size)
{
block_size->width = DIV_ROUND_UP(pixel_size->width,
desc->block_size.width);
block_size->height = DIV_ROUND_UP(pixel_size->height,
desc->block_size.height);
block_size->depth = DIV_ROUND_UP(pixel_size->depth,
desc->block_size.depth);
}
 
static inline bool
svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
{
return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
}
 
static inline u32
svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
const surf_size_struct *size)
{
u32 pitch;
surf_size_struct blocks;
 
svga3dsurface_get_size_in_blocks(desc, size, &blocks);
 
pitch = blocks.width * desc->pitch_bytes_per_block;
 
return pitch;
}
 
/*
*-----------------------------------------------------------------------------
*
* svga3dsurface_get_image_buffer_size --
*
* Return the number of bytes of buffer space required to store
* one image of a surface, optionally using the specified pitch.
*
* If pitch is zero, it is assumed that rows are tightly packed.
*
* This function is overflow-safe. If the result would have
* overflowed, instead we return MAX_UINT32.
*
* Results:
* Byte count.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
 
static inline u32
svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
const surf_size_struct *size,
u32 pitch)
{
surf_size_struct image_blocks;
u32 slice_size, total_size;
 
svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
 
if (svga3dsurface_is_planar_surface(desc)) {
total_size = clamped_umul32(image_blocks.width,
image_blocks.height);
total_size = clamped_umul32(total_size, image_blocks.depth);
total_size = clamped_umul32(total_size, desc->bytes_per_block);
return total_size;
}
 
if (pitch == 0)
pitch = svga3dsurface_calculate_pitch(desc, size);
 
slice_size = clamped_umul32(image_blocks.height, pitch);
total_size = clamped_umul32(slice_size, image_blocks.depth);
 
return total_size;
}
 
static inline u32
svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
surf_size_struct base_level_size,
u32 num_mip_levels,
u32 num_layers)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
u32 total_size = 0;
u32 mip;
 
for (mip = 0; mip < num_mip_levels; mip++) {
surf_size_struct size =
svga3dsurface_get_mip_size(base_level_size, mip);
total_size += svga3dsurface_get_image_buffer_size(desc,
&size, 0);
}
 
return total_size * num_layers;
}
 
 
/**
* svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
* in an image (or volume).
*
* @width: The image width in pixels.
* @height: The image height in pixels
*/
static inline u32
svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
u32 width, u32 height,
u32 x, u32 y, u32 z)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
const u32 bw = desc->block_size.width, bh = desc->block_size.height;
const u32 bd = desc->block_size.depth;
const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
const u32 offset = (z / bd * imgstride +
y / bh * rowstride +
x / bw * desc->bytes_per_block);
return offset;
}
 
 
static inline u32
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
surf_size_struct baseLevelSize,
u32 numMipLevels,
u32 face,
u32 mip)
 
{
u32 offset;
u32 mipChainBytes;
u32 mipChainBytesToLevel;
u32 i;
const struct svga3d_surface_desc *desc;
surf_size_struct mipSize;
u32 bytes;
 
desc = svga3dsurface_get_desc(format);
 
mipChainBytes = 0;
mipChainBytesToLevel = 0;
for (i = 0; i < numMipLevels; i++) {
mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
mipChainBytes += bytes;
if (i < mip)
mipChainBytesToLevel += bytes;
}
 
offset = mipChainBytes * face + mipChainBytesToLevel;
 
return offset;
}
 
 
/**
* svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
* a ScreenTarget?
* (with just the GBObjects cap-bit
* set)
* @format: format to queried
*
* RETURNS:
* true if queried format is valid for screen targets
*/
static inline bool
svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
{
return (format == SVGA3D_X8R8G8B8 ||
format == SVGA3D_A8R8G8B8 ||
format == SVGA3D_R5G6B5 ||
format == SVGA3D_X1R5G5B5 ||
format == SVGA3D_A1R5G5B5 ||
format == SVGA3D_P8);
}
 
 
/**
* svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
* a ScreenTarget?
* (with DX10 enabled)
*
* @format: format to queried
*
* Results:
* true if queried format is valid for screen targets
*/
static inline bool
svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
{
return (format == SVGA3D_R8G8B8A8_UNORM ||
format == SVGA3D_B8G8R8A8_UNORM ||
format == SVGA3D_B8G8R8X8_UNORM);
}
 
 
/**
* svga3dsurface_is_screen_target_format - Is the specified format usable as a
* ScreenTarget?
* (for some combination of caps)
*
* @format: format to queried
*
* Results:
* true if queried format is valid for screen targets
*/
static inline bool
svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
{
if (svga3dsurface_is_gb_screen_target_format(format)) {
return true;
}
return svga3dsurface_is_dx_screen_target_format(format);
}
/drivers/video/drm/vmwgfx/device_include/svga3d_types.h
0,0 → 1,1633
/**********************************************************
* Copyright 2012-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga3d_types.h --
*
* SVGA 3d hardware definitions for basic types
*/
 
#ifndef _SVGA3D_TYPES_H_
#define _SVGA3D_TYPES_H_
 
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
#define INCLUDE_ALLOW_VMCORE
 
#include "includeCheck.h"
 
/*
* Generic Types
*/
 
#define SVGA3D_INVALID_ID ((uint32)-1)
 
typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
typedef uint32 SVGA3dColor; /* a, r, g, b */
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCopyRect {
uint32 x;
uint32 y;
uint32 w;
uint32 h;
uint32 srcx;
uint32 srcy;
}
#include "vmware_pack_end.h"
SVGA3dCopyRect;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCopyBox {
uint32 x;
uint32 y;
uint32 z;
uint32 w;
uint32 h;
uint32 d;
uint32 srcx;
uint32 srcy;
uint32 srcz;
}
#include "vmware_pack_end.h"
SVGA3dCopyBox;
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dRect {
uint32 x;
uint32 y;
uint32 w;
uint32 h;
}
#include "vmware_pack_end.h"
SVGA3dRect;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 x;
uint32 y;
uint32 z;
uint32 w;
uint32 h;
uint32 d;
}
#include "vmware_pack_end.h"
SVGA3dBox;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 x;
uint32 y;
uint32 z;
}
#include "vmware_pack_end.h"
SVGA3dPoint;
 
/*
* Surface formats.
*/
typedef enum SVGA3dSurfaceFormat {
SVGA3D_FORMAT_INVALID = 0,
 
SVGA3D_X8R8G8B8 = 1,
SVGA3D_FORMAT_MIN = 1,
 
SVGA3D_A8R8G8B8 = 2,
 
SVGA3D_R5G6B5 = 3,
SVGA3D_X1R5G5B5 = 4,
SVGA3D_A1R5G5B5 = 5,
SVGA3D_A4R4G4B4 = 6,
 
SVGA3D_Z_D32 = 7,
SVGA3D_Z_D16 = 8,
SVGA3D_Z_D24S8 = 9,
SVGA3D_Z_D15S1 = 10,
 
SVGA3D_LUMINANCE8 = 11,
SVGA3D_LUMINANCE4_ALPHA4 = 12,
SVGA3D_LUMINANCE16 = 13,
SVGA3D_LUMINANCE8_ALPHA8 = 14,
 
SVGA3D_DXT1 = 15,
SVGA3D_DXT2 = 16,
SVGA3D_DXT3 = 17,
SVGA3D_DXT4 = 18,
SVGA3D_DXT5 = 19,
 
SVGA3D_BUMPU8V8 = 20,
SVGA3D_BUMPL6V5U5 = 21,
SVGA3D_BUMPX8L8V8U8 = 22,
SVGA3D_BUMPL8V8U8 = 23,
 
SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
 
SVGA3D_A2R10G10B10 = 26,
 
/* signed formats */
SVGA3D_V8U8 = 27,
SVGA3D_Q8W8V8U8 = 28,
SVGA3D_CxV8U8 = 29,
 
/* mixed formats */
SVGA3D_X8L8V8U8 = 30,
SVGA3D_A2W10V10U10 = 31,
 
SVGA3D_ALPHA8 = 32,
 
/* Single- and dual-component floating point formats */
SVGA3D_R_S10E5 = 33,
SVGA3D_R_S23E8 = 34,
SVGA3D_RG_S10E5 = 35,
SVGA3D_RG_S23E8 = 36,
 
SVGA3D_BUFFER = 37,
 
SVGA3D_Z_D24X8 = 38,
 
SVGA3D_V16U16 = 39,
 
SVGA3D_G16R16 = 40,
SVGA3D_A16B16G16R16 = 41,
 
/* Packed Video formats */
SVGA3D_UYVY = 42,
SVGA3D_YUY2 = 43,
 
/* Planar video formats */
SVGA3D_NV12 = 44,
 
/* Video format with alpha */
SVGA3D_AYUV = 45,
 
SVGA3D_R32G32B32A32_TYPELESS = 46,
SVGA3D_R32G32B32A32_UINT = 47,
SVGA3D_R32G32B32A32_SINT = 48,
SVGA3D_R32G32B32_TYPELESS = 49,
SVGA3D_R32G32B32_FLOAT = 50,
SVGA3D_R32G32B32_UINT = 51,
SVGA3D_R32G32B32_SINT = 52,
SVGA3D_R16G16B16A16_TYPELESS = 53,
SVGA3D_R16G16B16A16_UINT = 54,
SVGA3D_R16G16B16A16_SNORM = 55,
SVGA3D_R16G16B16A16_SINT = 56,
SVGA3D_R32G32_TYPELESS = 57,
SVGA3D_R32G32_UINT = 58,
SVGA3D_R32G32_SINT = 59,
SVGA3D_R32G8X24_TYPELESS = 60,
SVGA3D_D32_FLOAT_S8X24_UINT = 61,
SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62,
SVGA3D_X32_TYPELESS_G8X24_UINT = 63,
SVGA3D_R10G10B10A2_TYPELESS = 64,
SVGA3D_R10G10B10A2_UINT = 65,
SVGA3D_R11G11B10_FLOAT = 66,
SVGA3D_R8G8B8A8_TYPELESS = 67,
SVGA3D_R8G8B8A8_UNORM = 68,
SVGA3D_R8G8B8A8_UNORM_SRGB = 69,
SVGA3D_R8G8B8A8_UINT = 70,
SVGA3D_R8G8B8A8_SINT = 71,
SVGA3D_R16G16_TYPELESS = 72,
SVGA3D_R16G16_UINT = 73,
SVGA3D_R16G16_SINT = 74,
SVGA3D_R32_TYPELESS = 75,
SVGA3D_D32_FLOAT = 76,
SVGA3D_R32_UINT = 77,
SVGA3D_R32_SINT = 78,
SVGA3D_R24G8_TYPELESS = 79,
SVGA3D_D24_UNORM_S8_UINT = 80,
SVGA3D_R24_UNORM_X8_TYPELESS = 81,
SVGA3D_X24_TYPELESS_G8_UINT = 82,
SVGA3D_R8G8_TYPELESS = 83,
SVGA3D_R8G8_UNORM = 84,
SVGA3D_R8G8_UINT = 85,
SVGA3D_R8G8_SINT = 86,
SVGA3D_R16_TYPELESS = 87,
SVGA3D_R16_UNORM = 88,
SVGA3D_R16_UINT = 89,
SVGA3D_R16_SNORM = 90,
SVGA3D_R16_SINT = 91,
SVGA3D_R8_TYPELESS = 92,
SVGA3D_R8_UNORM = 93,
SVGA3D_R8_UINT = 94,
SVGA3D_R8_SNORM = 95,
SVGA3D_R8_SINT = 96,
SVGA3D_P8 = 97,
SVGA3D_R9G9B9E5_SHAREDEXP = 98,
SVGA3D_R8G8_B8G8_UNORM = 99,
SVGA3D_G8R8_G8B8_UNORM = 100,
SVGA3D_BC1_TYPELESS = 101,
SVGA3D_BC1_UNORM_SRGB = 102,
SVGA3D_BC2_TYPELESS = 103,
SVGA3D_BC2_UNORM_SRGB = 104,
SVGA3D_BC3_TYPELESS = 105,
SVGA3D_BC3_UNORM_SRGB = 106,
SVGA3D_BC4_TYPELESS = 107,
SVGA3D_ATI1 = 108, /* DX9-specific BC4_UNORM */
SVGA3D_BC4_SNORM = 109,
SVGA3D_BC5_TYPELESS = 110,
SVGA3D_ATI2 = 111, /* DX9-specific BC5_UNORM */
SVGA3D_BC5_SNORM = 112,
SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113,
SVGA3D_B8G8R8A8_TYPELESS = 114,
SVGA3D_B8G8R8A8_UNORM_SRGB = 115,
SVGA3D_B8G8R8X8_TYPELESS = 116,
SVGA3D_B8G8R8X8_UNORM_SRGB = 117,
 
/* Advanced depth formats. */
SVGA3D_Z_DF16 = 118,
SVGA3D_Z_DF24 = 119,
SVGA3D_Z_D24S8_INT = 120,
 
/* Planar video formats. */
SVGA3D_YV12 = 121,
 
SVGA3D_R32G32B32A32_FLOAT = 122,
SVGA3D_R16G16B16A16_FLOAT = 123,
SVGA3D_R16G16B16A16_UNORM = 124,
SVGA3D_R32G32_FLOAT = 125,
SVGA3D_R10G10B10A2_UNORM = 126,
SVGA3D_R8G8B8A8_SNORM = 127,
SVGA3D_R16G16_FLOAT = 128,
SVGA3D_R16G16_UNORM = 129,
SVGA3D_R16G16_SNORM = 130,
SVGA3D_R32_FLOAT = 131,
SVGA3D_R8G8_SNORM = 132,
SVGA3D_R16_FLOAT = 133,
SVGA3D_D16_UNORM = 134,
SVGA3D_A8_UNORM = 135,
SVGA3D_BC1_UNORM = 136,
SVGA3D_BC2_UNORM = 137,
SVGA3D_BC3_UNORM = 138,
SVGA3D_B5G6R5_UNORM = 139,
SVGA3D_B5G5R5A1_UNORM = 140,
SVGA3D_B8G8R8A8_UNORM = 141,
SVGA3D_B8G8R8X8_UNORM = 142,
SVGA3D_BC4_UNORM = 143,
SVGA3D_BC5_UNORM = 144,
 
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
 
typedef enum SVGA3dSurfaceFlags {
SVGA3D_SURFACE_CUBEMAP = (1 << 0),
 
/*
* HINT flags are not enforced by the device but are useful for
* performance.
*/
SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
SVGA3D_SURFACE_DECODE_RENDERTARGET = (1 << 11),
 
/*
* Is this surface using a base-level pitch for it's mob backing?
*
* This flag is not intended to be set by guest-drivers, but is instead
* set by the device when the surface is bound to a mob with a specified
* pitch.
*/
SVGA3D_SURFACE_MOB_PITCH = (1 << 12),
 
SVGA3D_SURFACE_INACTIVE = (1 << 13),
SVGA3D_SURFACE_HINT_RT_LOCKABLE = (1 << 14),
SVGA3D_SURFACE_VOLUME = (1 << 15),
 
/*
* Required to be set on a surface to bind it to a screen target.
*/
SVGA3D_SURFACE_SCREENTARGET = (1 << 16),
 
/*
* Align images in the guest-backing mob to 16-bytes.
*/
SVGA3D_SURFACE_ALIGN16 = (1 << 17),
 
SVGA3D_SURFACE_1D = (1 << 18),
SVGA3D_SURFACE_ARRAY = (1 << 19),
 
/*
* Bind flags.
* These are enforced for any surface defined with DefineGBSurface_v2.
*/
SVGA3D_SURFACE_BIND_VERTEX_BUFFER = (1 << 20),
SVGA3D_SURFACE_BIND_INDEX_BUFFER = (1 << 21),
SVGA3D_SURFACE_BIND_CONSTANT_BUFFER = (1 << 22),
SVGA3D_SURFACE_BIND_SHADER_RESOURCE = (1 << 23),
SVGA3D_SURFACE_BIND_RENDER_TARGET = (1 << 24),
SVGA3D_SURFACE_BIND_DEPTH_STENCIL = (1 << 25),
SVGA3D_SURFACE_BIND_STREAM_OUTPUT = (1 << 26),
 
/*
* A note on staging flags:
*
* The STAGING flags notes that the surface will not be used directly by the
* drawing pipeline, i.e. that it will not be bound to any bind point.
* Staging surfaces may be used by copy operations to move data in and out
* of other surfaces.
*
* The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
* updates indirectly, i.e. the surface will not be updated directly, but
* will receive copies from staging surfaces.
*/
SVGA3D_SURFACE_STAGING_UPLOAD = (1 << 27),
SVGA3D_SURFACE_STAGING_DOWNLOAD = (1 << 28),
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE = (1 << 29),
 
/*
* Setting this flag allow this surface to be used with the
* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
* buffer surfaces, an no bind flags are allowed to be set on surfaces
* with this flag.
*/
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER = (1 << 30),
 
/*
* Marker for the last defined bit.
*/
SVGA3D_SURFACE_FLAG_MAX = (1 << 31),
} SVGA3dSurfaceFlags;
 
#define SVGA3D_SURFACE_HB_DISALLOWED_MASK \
( SVGA3D_SURFACE_MOB_PITCH | \
SVGA3D_SURFACE_SCREENTARGET | \
SVGA3D_SURFACE_ALIGN16 | \
SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
)
 
#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
SVGA3D_SURFACE_DECODE_RENDERTARGET | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
SVGA3D_SURFACE_ARRAY | \
SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
)
 
#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
SVGA3D_SURFACE_DECODE_RENDERTARGET | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
SVGA3D_SURFACE_INACTIVE | \
SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
)
 
#define SVGA3D_SURFACE_DX_ONLY_MASK \
( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
 
#define SVGA3D_SURFACE_STAGING_MASK \
( SVGA3D_SURFACE_STAGING_UPLOAD | \
SVGA3D_SURFACE_STAGING_DOWNLOAD \
)
 
#define SVGA3D_SURFACE_BIND_MASK \
( SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
SVGA3D_SURFACE_BIND_RENDER_TARGET | \
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT \
)
 
typedef enum {
SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
 
/*
* This format can be used as a render target if the current display mode
* is the same depth if the alpha channel is ignored. e.g. if the device
* can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
* format op list entry for A8R8G8B8 should have this cap.
*/
SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
 
/*
* This format contains DirectDraw support (including Flip). This flag
* should not to be set on alpha formats.
*/
SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
 
/*
* The rasterizer can support some level of Direct3D support in this format
* and implies that the driver can create a Context in this mode (for some
* render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
* flag must also be set.
*/
SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
 
/*
* This is set for a private format when the driver has put the bpp in
* the structure.
*/
SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
 
/*
* Indicates that this format can be converted to any RGB format for which
* SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
*/
SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
 
/*
* Indicates that this format can be used to create offscreen plain surfaces.
*/
SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
 
/*
* Indicated that this format can be read as an SRGB texture (meaning that the
* sampler will linearize the looked up data)
*/
SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
 
/*
* Indicates that this format can be used in the bumpmap instructions
*/
SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
 
/*
* Indicates that this format can be sampled by the displacement map sampler
*/
SVGA3DFORMAT_OP_DMAP = 0x00020000,
 
/*
* Indicates that this format cannot be used with texture filtering
*/
SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
 
/*
* Indicates that format conversions are supported to this RGB format if
* SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
*/
SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
 
/*
* Indicated that this format can be written as an SRGB target
* (meaning that the pixel pipe will DE-linearize data on output to format)
*/
SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
 
/*
* Indicates that this format cannot be used with alpha blending
*/
SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
 
/*
* Indicates that the device can auto-generated sublevels for resources
* of this format
*/
SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
 
/*
* Indicates that this format can be used by vertex texture sampler
*/
SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
 
/*
* Indicates that this format supports neither texture coordinate
* wrap modes, nor mipmapping.
*/
SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
} SVGA3dFormatOp;
 
#define SVGA3D_FORMAT_POSITIVE \
(SVGA3DFORMAT_OP_TEXTURE | \
SVGA3DFORMAT_OP_VOLUMETEXTURE | \
SVGA3DFORMAT_OP_CUBETEXTURE | \
SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET | \
SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET | \
SVGA3DFORMAT_OP_ZSTENCIL | \
SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH | \
SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
SVGA3DFORMAT_OP_DISPLAYMODE | \
SVGA3DFORMAT_OP_3DACCELERATION | \
SVGA3DFORMAT_OP_PIXELSIZE | \
SVGA3DFORMAT_OP_CONVERT_TO_ARGB | \
SVGA3DFORMAT_OP_OFFSCREENPLAIN | \
SVGA3DFORMAT_OP_SRGBREAD | \
SVGA3DFORMAT_OP_BUMPMAP | \
SVGA3DFORMAT_OP_DMAP | \
SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB | \
SVGA3DFORMAT_OP_SRGBWRITE | \
SVGA3DFORMAT_OP_AUTOGENMIPMAP | \
SVGA3DFORMAT_OP_VERTEXTEXTURE)
 
#define SVGA3D_FORMAT_NEGATIVE \
(SVGA3DFORMAT_OP_NOFILTER | \
SVGA3DFORMAT_OP_NOALPHABLEND | \
SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
 
/*
* This structure is a conversion of SVGA3DFORMAT_OP_*
* Entries must be located at the same position.
*/
typedef union {
uint32 value;
struct {
uint32 texture : 1;
uint32 volumeTexture : 1;
uint32 cubeTexture : 1;
uint32 offscreenRenderTarget : 1;
uint32 sameFormatRenderTarget : 1;
uint32 unknown1 : 1;
uint32 zStencil : 1;
uint32 zStencilArbitraryDepth : 1;
uint32 sameFormatUpToAlpha : 1;
uint32 unknown2 : 1;
uint32 displayMode : 1;
uint32 acceleration3d : 1;
uint32 pixelSize : 1;
uint32 convertToARGB : 1;
uint32 offscreenPlain : 1;
uint32 sRGBRead : 1;
uint32 bumpMap : 1;
uint32 dmap : 1;
uint32 noFilter : 1;
uint32 memberOfGroupARGB : 1;
uint32 sRGBWrite : 1;
uint32 noAlphaBlend : 1;
uint32 autoGenMipMap : 1;
uint32 vertexTexture : 1;
uint32 noTexCoordWrapNorMip : 1;
};
} SVGA3dSurfaceFormatCaps;
 
/*
* SVGA_3D_CMD_SETRENDERSTATE Types. All value types
* must fit in a uint32.
*/
 
typedef enum {
SVGA3D_RS_INVALID = 0,
SVGA3D_RS_MIN = 1,
SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
SVGA3D_RS_STENCILREF = 13, /* uint32 */
SVGA3D_RS_STENCILMASK = 14, /* uint32 */
SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
SVGA3D_RS_FOGSTART = 16, /* float */
SVGA3D_RS_FOGEND = 17, /* float */
SVGA3D_RS_FOGDENSITY = 18, /* float */
SVGA3D_RS_POINTSIZE = 19, /* float */
SVGA3D_RS_POINTSIZEMIN = 20, /* float */
SVGA3D_RS_POINTSIZEMAX = 21, /* float */
SVGA3D_RS_POINTSCALE_A = 22, /* float */
SVGA3D_RS_POINTSCALE_B = 23, /* float */
SVGA3D_RS_POINTSCALE_C = 24, /* float */
SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
SVGA3D_RS_ZBIAS = 45, /* float */
SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
SVGA3D_RS_DEPTHBIAS = 64, /* float */
 
 
/*
* Output Gamma Level
*
* Output gamma effects the gamma curve of colors that are output from the
* rendering pipeline. A value of 1.0 specifies a linear color space. If the
* value is <= 0.0, gamma correction is ignored and linear color space is
* used.
*/
 
SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
SVGA3D_RS_TWEENFACTOR = 88, /* float */
SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
SVGA3D_RS_LINEWIDTH = 98, /* float */
SVGA3D_RS_MAX
} SVGA3dRenderStateName;
 
typedef enum {
SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
SVGA3D_TRANSPARENCYANTIALIAS_MAX
} SVGA3dTransparencyAntialiasType;
 
typedef enum {
SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
SVGA3D_VERTEXMATERIAL_MAX = 3,
} SVGA3dVertexMaterial;
 
typedef enum {
SVGA3D_FILLMODE_INVALID = 0,
SVGA3D_FILLMODE_MIN = 1,
SVGA3D_FILLMODE_POINT = 1,
SVGA3D_FILLMODE_LINE = 2,
SVGA3D_FILLMODE_FILL = 3,
SVGA3D_FILLMODE_MAX
} SVGA3dFillModeType;
 
 
typedef
#include "vmware_pack_begin.h"
union {
struct {
uint16 mode; /* SVGA3dFillModeType */
uint16 face; /* SVGA3dFace */
};
uint32 uintValue;
}
#include "vmware_pack_end.h"
SVGA3dFillMode;
 
typedef enum {
SVGA3D_SHADEMODE_INVALID = 0,
SVGA3D_SHADEMODE_FLAT = 1,
SVGA3D_SHADEMODE_SMOOTH = 2,
SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
SVGA3D_SHADEMODE_MAX
} SVGA3dShadeMode;
 
typedef
#include "vmware_pack_begin.h"
union {
struct {
uint16 repeat;
uint16 pattern;
};
uint32 uintValue;
}
#include "vmware_pack_end.h"
SVGA3dLinePattern;
 
typedef enum {
SVGA3D_BLENDOP_INVALID = 0,
SVGA3D_BLENDOP_MIN = 1,
SVGA3D_BLENDOP_ZERO = 1,
SVGA3D_BLENDOP_ONE = 2,
SVGA3D_BLENDOP_SRCCOLOR = 3,
SVGA3D_BLENDOP_INVSRCCOLOR = 4,
SVGA3D_BLENDOP_SRCALPHA = 5,
SVGA3D_BLENDOP_INVSRCALPHA = 6,
SVGA3D_BLENDOP_DESTALPHA = 7,
SVGA3D_BLENDOP_INVDESTALPHA = 8,
SVGA3D_BLENDOP_DESTCOLOR = 9,
SVGA3D_BLENDOP_INVDESTCOLOR = 10,
SVGA3D_BLENDOP_SRCALPHASAT = 11,
SVGA3D_BLENDOP_BLENDFACTOR = 12,
SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
SVGA3D_BLENDOP_SRC1COLOR = 14,
SVGA3D_BLENDOP_INVSRC1COLOR = 15,
SVGA3D_BLENDOP_SRC1ALPHA = 16,
SVGA3D_BLENDOP_INVSRC1ALPHA = 17,
SVGA3D_BLENDOP_BLENDFACTORALPHA = 18,
SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
SVGA3D_BLENDOP_MAX
} SVGA3dBlendOp;
 
typedef enum {
SVGA3D_BLENDEQ_INVALID = 0,
SVGA3D_BLENDEQ_MIN = 1,
SVGA3D_BLENDEQ_ADD = 1,
SVGA3D_BLENDEQ_SUBTRACT = 2,
SVGA3D_BLENDEQ_REVSUBTRACT = 3,
SVGA3D_BLENDEQ_MINIMUM = 4,
SVGA3D_BLENDEQ_MAXIMUM = 5,
SVGA3D_BLENDEQ_MAX
} SVGA3dBlendEquation;
 
typedef enum {
SVGA3D_DX11_LOGICOP_MIN = 0,
SVGA3D_DX11_LOGICOP_CLEAR = 0,
SVGA3D_DX11_LOGICOP_SET = 1,
SVGA3D_DX11_LOGICOP_COPY = 2,
SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
SVGA3D_DX11_LOGICOP_NOOP = 4,
SVGA3D_DX11_LOGICOP_INVERT = 5,
SVGA3D_DX11_LOGICOP_AND = 6,
SVGA3D_DX11_LOGICOP_NAND = 7,
SVGA3D_DX11_LOGICOP_OR = 8,
SVGA3D_DX11_LOGICOP_NOR = 9,
SVGA3D_DX11_LOGICOP_XOR = 10,
SVGA3D_DX11_LOGICOP_EQUIV = 11,
SVGA3D_DX11_LOGICOP_AND_REVERSE = 12,
SVGA3D_DX11_LOGICOP_AND_INVERTED = 13,
SVGA3D_DX11_LOGICOP_OR_REVERSE = 14,
SVGA3D_DX11_LOGICOP_OR_INVERTED = 15,
SVGA3D_DX11_LOGICOP_MAX
} SVGA3dDX11LogicOp;
 
typedef enum {
SVGA3D_FRONTWINDING_INVALID = 0,
SVGA3D_FRONTWINDING_CW = 1,
SVGA3D_FRONTWINDING_CCW = 2,
SVGA3D_FRONTWINDING_MAX
} SVGA3dFrontWinding;
 
typedef enum {
SVGA3D_FACE_INVALID = 0,
SVGA3D_FACE_NONE = 1,
SVGA3D_FACE_MIN = 1,
SVGA3D_FACE_FRONT = 2,
SVGA3D_FACE_BACK = 3,
SVGA3D_FACE_FRONT_BACK = 4,
SVGA3D_FACE_MAX
} SVGA3dFace;
 
/*
* The order and the values should not be changed
*/
 
typedef enum {
SVGA3D_CMP_INVALID = 0,
SVGA3D_CMP_NEVER = 1,
SVGA3D_CMP_LESS = 2,
SVGA3D_CMP_EQUAL = 3,
SVGA3D_CMP_LESSEQUAL = 4,
SVGA3D_CMP_GREATER = 5,
SVGA3D_CMP_NOTEQUAL = 6,
SVGA3D_CMP_GREATEREQUAL = 7,
SVGA3D_CMP_ALWAYS = 8,
SVGA3D_CMP_MAX
} SVGA3dCmpFunc;
 
/*
* SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
* the fog factor to be specified in the alpha component of the specular
* (a.k.a. secondary) vertex color.
*/
typedef enum {
SVGA3D_FOGFUNC_INVALID = 0,
SVGA3D_FOGFUNC_EXP = 1,
SVGA3D_FOGFUNC_EXP2 = 2,
SVGA3D_FOGFUNC_LINEAR = 3,
SVGA3D_FOGFUNC_PER_VERTEX = 4
} SVGA3dFogFunction;
 
/*
* SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
* or per-pixel basis.
*/
typedef enum {
SVGA3D_FOGTYPE_INVALID = 0,
SVGA3D_FOGTYPE_VERTEX = 1,
SVGA3D_FOGTYPE_PIXEL = 2,
SVGA3D_FOGTYPE_MAX = 3
} SVGA3dFogType;
 
/*
* SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
* computed using the eye Z value of each pixel (or vertex), whereas range-
* based fog is computed using the actual distance (range) to the eye.
*/
typedef enum {
SVGA3D_FOGBASE_INVALID = 0,
SVGA3D_FOGBASE_DEPTHBASED = 1,
SVGA3D_FOGBASE_RANGEBASED = 2,
SVGA3D_FOGBASE_MAX = 3
} SVGA3dFogBase;
 
typedef enum {
SVGA3D_STENCILOP_INVALID = 0,
SVGA3D_STENCILOP_MIN = 1,
SVGA3D_STENCILOP_KEEP = 1,
SVGA3D_STENCILOP_ZERO = 2,
SVGA3D_STENCILOP_REPLACE = 3,
SVGA3D_STENCILOP_INCRSAT = 4,
SVGA3D_STENCILOP_DECRSAT = 5,
SVGA3D_STENCILOP_INVERT = 6,
SVGA3D_STENCILOP_INCR = 7,
SVGA3D_STENCILOP_DECR = 8,
SVGA3D_STENCILOP_MAX
} SVGA3dStencilOp;
 
typedef enum {
SVGA3D_CLIPPLANE_0 = (1 << 0),
SVGA3D_CLIPPLANE_1 = (1 << 1),
SVGA3D_CLIPPLANE_2 = (1 << 2),
SVGA3D_CLIPPLANE_3 = (1 << 3),
SVGA3D_CLIPPLANE_4 = (1 << 4),
SVGA3D_CLIPPLANE_5 = (1 << 5),
} SVGA3dClipPlanes;
 
typedef enum {
SVGA3D_CLEAR_COLOR = 0x1,
SVGA3D_CLEAR_DEPTH = 0x2,
SVGA3D_CLEAR_STENCIL = 0x4,
 
/*
* Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
* SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
* bit will be ignored.
*/
SVGA3D_CLEAR_COLORFILL = 0x8
} SVGA3dClearFlag;
 
typedef enum {
SVGA3D_RT_DEPTH = 0,
SVGA3D_RT_MIN = 0,
SVGA3D_RT_STENCIL = 1,
SVGA3D_RT_COLOR0 = 2,
SVGA3D_RT_COLOR1 = 3,
SVGA3D_RT_COLOR2 = 4,
SVGA3D_RT_COLOR3 = 5,
SVGA3D_RT_COLOR4 = 6,
SVGA3D_RT_COLOR5 = 7,
SVGA3D_RT_COLOR6 = 8,
SVGA3D_RT_COLOR7 = 9,
SVGA3D_RT_MAX,
SVGA3D_RT_INVALID = ((uint32)-1),
} SVGA3dRenderTargetType;
 
#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
 
typedef
#include "vmware_pack_begin.h"
union {
struct {
uint32 red : 1;
uint32 green : 1;
uint32 blue : 1;
uint32 alpha : 1;
};
uint32 uintValue;
}
#include "vmware_pack_end.h"
SVGA3dColorMask;
 
typedef enum {
SVGA3D_VBLEND_DISABLE = 0,
SVGA3D_VBLEND_1WEIGHT = 1,
SVGA3D_VBLEND_2WEIGHT = 2,
SVGA3D_VBLEND_3WEIGHT = 3,
SVGA3D_VBLEND_MAX = 4,
} SVGA3dVertexBlendFlags;
 
typedef enum {
SVGA3D_WRAPCOORD_0 = 1 << 0,
SVGA3D_WRAPCOORD_1 = 1 << 1,
SVGA3D_WRAPCOORD_2 = 1 << 2,
SVGA3D_WRAPCOORD_3 = 1 << 3,
SVGA3D_WRAPCOORD_ALL = 0xF,
} SVGA3dWrapFlags;
 
/*
* SVGA_3D_CMD_TEXTURESTATE Types. All value types
* must fit in a uint32.
*/
 
typedef enum {
SVGA3D_TS_INVALID = 0,
SVGA3D_TS_MIN = 1,
SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
 
 
/*
* Sampler Gamma Level
*
* Sampler gamma effects the color of samples taken from the sampler. A
* value of 1.0 will produce linear samples. If the value is <= 0.0 the
* gamma value is ignored and a linear space is used.
*/
 
SVGA3D_TS_GAMMA = 25, /* float */
SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
SVGA3D_TS_PREGB_MAX = 30, /* Max value before GBObjects */
SVGA3D_TS_CONSTANT = 30, /* SVGA3dColor */
SVGA3D_TS_COLOR_KEY_ENABLE = 31, /* SVGA3dBool */
SVGA3D_TS_COLOR_KEY = 32, /* SVGA3dColor */
SVGA3D_TS_MAX
} SVGA3dTextureStateName;
 
typedef enum {
SVGA3D_TC_INVALID = 0,
SVGA3D_TC_DISABLE = 1,
SVGA3D_TC_SELECTARG1 = 2,
SVGA3D_TC_SELECTARG2 = 3,
SVGA3D_TC_MODULATE = 4,
SVGA3D_TC_ADD = 5,
SVGA3D_TC_ADDSIGNED = 6,
SVGA3D_TC_SUBTRACT = 7,
SVGA3D_TC_BLENDTEXTUREALPHA = 8,
SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
SVGA3D_TC_BLENDCURRENTALPHA = 10,
SVGA3D_TC_BLENDFACTORALPHA = 11,
SVGA3D_TC_MODULATE2X = 12,
SVGA3D_TC_MODULATE4X = 13,
SVGA3D_TC_DSDT = 14,
SVGA3D_TC_DOTPRODUCT3 = 15,
SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
SVGA3D_TC_ADDSIGNED2X = 17,
SVGA3D_TC_ADDSMOOTH = 18,
SVGA3D_TC_PREMODULATE = 19,
SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
SVGA3D_TC_MULTIPLYADD = 25,
SVGA3D_TC_LERP = 26,
SVGA3D_TC_MAX
} SVGA3dTextureCombiner;
 
#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
 
typedef enum {
SVGA3D_TEX_ADDRESS_INVALID = 0,
SVGA3D_TEX_ADDRESS_MIN = 1,
SVGA3D_TEX_ADDRESS_WRAP = 1,
SVGA3D_TEX_ADDRESS_MIRROR = 2,
SVGA3D_TEX_ADDRESS_CLAMP = 3,
SVGA3D_TEX_ADDRESS_BORDER = 4,
SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
SVGA3D_TEX_ADDRESS_EDGE = 6,
SVGA3D_TEX_ADDRESS_MAX
} SVGA3dTextureAddress;
 
/*
* SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
* disabled, and the rasterizer should use the magnification filter instead.
*/
typedef enum {
SVGA3D_TEX_FILTER_NONE = 0,
SVGA3D_TEX_FILTER_MIN = 0,
SVGA3D_TEX_FILTER_NEAREST = 1,
SVGA3D_TEX_FILTER_LINEAR = 2,
SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
SVGA3D_TEX_FILTER_MAX
} SVGA3dTextureFilter;
 
typedef enum {
SVGA3D_TEX_TRANSFORM_OFF = 0,
SVGA3D_TEX_TRANSFORM_S = (1 << 0),
SVGA3D_TEX_TRANSFORM_T = (1 << 1),
SVGA3D_TEX_TRANSFORM_R = (1 << 2),
SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
SVGA3D_TEX_PROJECTED = (1 << 15),
} SVGA3dTexTransformFlags;
 
typedef enum {
SVGA3D_TEXCOORD_GEN_OFF = 0,
SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
SVGA3D_TEXCOORD_GEN_SPHERE = 4,
SVGA3D_TEXCOORD_GEN_MAX
} SVGA3dTextureCoordGen;
 
/*
* Texture argument constants for texture combiner
*/
typedef enum {
SVGA3D_TA_INVALID = 0,
SVGA3D_TA_TFACTOR = 1,
SVGA3D_TA_PREVIOUS = 2,
SVGA3D_TA_DIFFUSE = 3,
SVGA3D_TA_TEXTURE = 4,
SVGA3D_TA_SPECULAR = 5,
SVGA3D_TA_CONSTANT = 6,
SVGA3D_TA_MAX
} SVGA3dTextureArgData;
 
#define SVGA3D_TM_MASK_LEN 4
 
/* Modifiers for texture argument constants defined above. */
typedef enum {
SVGA3D_TM_NONE = 0,
SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
} SVGA3dTextureArgModifier;
 
/*
* Vertex declarations
*
* Notes:
*
* SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
* draw with any POSITIONT vertex arrays, the programmable vertex
* pipeline will be implicitly disabled. Drawing will take place as if
* no vertex shader was bound.
*/
 
typedef enum {
SVGA3D_DECLUSAGE_POSITION = 0,
SVGA3D_DECLUSAGE_BLENDWEIGHT,
SVGA3D_DECLUSAGE_BLENDINDICES,
SVGA3D_DECLUSAGE_NORMAL,
SVGA3D_DECLUSAGE_PSIZE,
SVGA3D_DECLUSAGE_TEXCOORD,
SVGA3D_DECLUSAGE_TANGENT,
SVGA3D_DECLUSAGE_BINORMAL,
SVGA3D_DECLUSAGE_TESSFACTOR,
SVGA3D_DECLUSAGE_POSITIONT,
SVGA3D_DECLUSAGE_COLOR,
SVGA3D_DECLUSAGE_FOG,
SVGA3D_DECLUSAGE_DEPTH,
SVGA3D_DECLUSAGE_SAMPLE,
SVGA3D_DECLUSAGE_MAX
} SVGA3dDeclUsage;
 
typedef enum {
SVGA3D_DECLMETHOD_DEFAULT = 0,
SVGA3D_DECLMETHOD_PARTIALU,
SVGA3D_DECLMETHOD_PARTIALV,
SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
SVGA3D_DECLMETHOD_UV,
SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
/* map */
} SVGA3dDeclMethod;
 
typedef enum {
SVGA3D_DECLTYPE_FLOAT1 = 0,
SVGA3D_DECLTYPE_FLOAT2 = 1,
SVGA3D_DECLTYPE_FLOAT3 = 2,
SVGA3D_DECLTYPE_FLOAT4 = 3,
SVGA3D_DECLTYPE_D3DCOLOR = 4,
SVGA3D_DECLTYPE_UBYTE4 = 5,
SVGA3D_DECLTYPE_SHORT2 = 6,
SVGA3D_DECLTYPE_SHORT4 = 7,
SVGA3D_DECLTYPE_UBYTE4N = 8,
SVGA3D_DECLTYPE_SHORT2N = 9,
SVGA3D_DECLTYPE_SHORT4N = 10,
SVGA3D_DECLTYPE_USHORT2N = 11,
SVGA3D_DECLTYPE_USHORT4N = 12,
SVGA3D_DECLTYPE_UDEC3 = 13,
SVGA3D_DECLTYPE_DEC3N = 14,
SVGA3D_DECLTYPE_FLOAT16_2 = 15,
SVGA3D_DECLTYPE_FLOAT16_4 = 16,
SVGA3D_DECLTYPE_MAX,
} SVGA3dDeclType;
 
/*
* This structure is used for the divisor for geometry instancing;
* it's a direct translation of the Direct3D equivalent.
*/
typedef union {
struct {
/*
* For index data, this number represents the number of instances to draw.
* For instance data, this number represents the number of
* instances/vertex in this stream
*/
uint32 count : 30;
 
/*
* This is 1 if this is supposed to be the data that is repeated for
* every instance.
*/
uint32 indexedData : 1;
 
/*
* This is 1 if this is supposed to be the per-instance data.
*/
uint32 instanceData : 1;
};
 
uint32 value;
} SVGA3dVertexDivisor;
 
typedef enum {
/*
* SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
*
* List MIN second so debuggers will think INVALID is
* the correct name.
*/
SVGA3D_PRIMITIVE_INVALID = 0,
SVGA3D_PRIMITIVE_MIN = 0,
SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
SVGA3D_PRIMITIVE_POINTLIST = 2,
SVGA3D_PRIMITIVE_LINELIST = 3,
SVGA3D_PRIMITIVE_LINESTRIP = 4,
SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
SVGA3D_PRIMITIVE_LINELIST_ADJ = 7,
SVGA3D_PRIMITIVE_PREDX_MAX = 7,
SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8,
SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9,
SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10,
SVGA3D_PRIMITIVE_MAX
} SVGA3dPrimitiveType;
 
typedef enum {
SVGA3D_COORDINATE_INVALID = 0,
SVGA3D_COORDINATE_LEFTHANDED = 1,
SVGA3D_COORDINATE_RIGHTHANDED = 2,
SVGA3D_COORDINATE_MAX
} SVGA3dCoordinateType;
 
typedef enum {
SVGA3D_TRANSFORM_INVALID = 0,
SVGA3D_TRANSFORM_WORLD = 1,
SVGA3D_TRANSFORM_MIN = 1,
SVGA3D_TRANSFORM_VIEW = 2,
SVGA3D_TRANSFORM_PROJECTION = 3,
SVGA3D_TRANSFORM_TEXTURE0 = 4,
SVGA3D_TRANSFORM_TEXTURE1 = 5,
SVGA3D_TRANSFORM_TEXTURE2 = 6,
SVGA3D_TRANSFORM_TEXTURE3 = 7,
SVGA3D_TRANSFORM_TEXTURE4 = 8,
SVGA3D_TRANSFORM_TEXTURE5 = 9,
SVGA3D_TRANSFORM_TEXTURE6 = 10,
SVGA3D_TRANSFORM_TEXTURE7 = 11,
SVGA3D_TRANSFORM_WORLD1 = 12,
SVGA3D_TRANSFORM_WORLD2 = 13,
SVGA3D_TRANSFORM_WORLD3 = 14,
SVGA3D_TRANSFORM_MAX
} SVGA3dTransformType;
 
typedef enum {
SVGA3D_LIGHTTYPE_INVALID = 0,
SVGA3D_LIGHTTYPE_MIN = 1,
SVGA3D_LIGHTTYPE_POINT = 1,
SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
SVGA3D_LIGHTTYPE_MAX
} SVGA3dLightType;
 
typedef enum {
SVGA3D_CUBEFACE_POSX = 0,
SVGA3D_CUBEFACE_NEGX = 1,
SVGA3D_CUBEFACE_POSY = 2,
SVGA3D_CUBEFACE_NEGY = 3,
SVGA3D_CUBEFACE_POSZ = 4,
SVGA3D_CUBEFACE_NEGZ = 5,
} SVGA3dCubeFace;
 
typedef enum {
SVGA3D_SHADERTYPE_INVALID = 0,
SVGA3D_SHADERTYPE_MIN = 1,
SVGA3D_SHADERTYPE_VS = 1,
SVGA3D_SHADERTYPE_PS = 2,
SVGA3D_SHADERTYPE_PREDX_MAX = 3,
SVGA3D_SHADERTYPE_GS = 3,
SVGA3D_SHADERTYPE_DX10_MAX = 4,
SVGA3D_SHADERTYPE_HS = 4,
SVGA3D_SHADERTYPE_DS = 5,
SVGA3D_SHADERTYPE_CS = 6,
SVGA3D_SHADERTYPE_MAX = 7
} SVGA3dShaderType;
 
#define SVGA3D_NUM_SHADERTYPE_PREDX \
(SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
 
#define SVGA3D_NUM_SHADERTYPE_DX10 \
(SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
 
#define SVGA3D_NUM_SHADERTYPE \
(SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
 
typedef enum {
SVGA3D_CONST_TYPE_MIN = 0,
SVGA3D_CONST_TYPE_FLOAT = 0,
SVGA3D_CONST_TYPE_INT = 1,
SVGA3D_CONST_TYPE_BOOL = 2,
SVGA3D_CONST_TYPE_MAX = 3,
} SVGA3dShaderConstType;
 
/*
* Register limits for shader consts.
*/
#define SVGA3D_CONSTREG_MAX 256
#define SVGA3D_CONSTINTREG_MAX 16
#define SVGA3D_CONSTBOOLREG_MAX 16
 
typedef enum {
SVGA3D_STRETCH_BLT_POINT = 0,
SVGA3D_STRETCH_BLT_LINEAR = 1,
SVGA3D_STRETCH_BLT_MAX
} SVGA3dStretchBltMode;
 
typedef enum {
SVGA3D_QUERYTYPE_INVALID = ((uint8)-1),
SVGA3D_QUERYTYPE_MIN = 0,
SVGA3D_QUERYTYPE_OCCLUSION = 0,
SVGA3D_QUERYTYPE_TIMESTAMP = 1,
SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT = 2,
SVGA3D_QUERYTYPE_PIPELINESTATS = 3,
SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE = 4,
SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5,
SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6,
SVGA3D_QUERYTYPE_OCCLUSION64 = 7,
SVGA3D_QUERYTYPE_EVENT = 8,
SVGA3D_QUERYTYPE_DX10_MAX = 9,
SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 9,
SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 10,
SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 11,
SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 12,
SVGA3D_QUERYTYPE_SOP_STREAM0 = 13,
SVGA3D_QUERYTYPE_SOP_STREAM1 = 14,
SVGA3D_QUERYTYPE_SOP_STREAM2 = 15,
SVGA3D_QUERYTYPE_SOP_STREAM3 = 16,
SVGA3D_QUERYTYPE_MAX
} SVGA3dQueryType;
 
typedef uint8 SVGA3dQueryTypeUint8;
 
#define SVGA3D_NUM_QUERYTYPE (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
 
/*
* This is the maximum number of queries per context that can be active
* simultaneously between a beginQuery and endQuery.
*/
#define SVGA3D_MAX_QUERY 64
 
/*
* Query result buffer formats
*/
typedef
#include "vmware_pack_begin.h"
struct {
uint32 samplesRendered;
}
#include "vmware_pack_end.h"
SVGADXOcclusionQueryResult;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 passed;
}
#include "vmware_pack_end.h"
SVGADXEventQueryResult;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint64 timestamp;
}
#include "vmware_pack_end.h"
SVGADXTimestampQueryResult;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint64 realFrequency;
uint32 disjoint;
}
#include "vmware_pack_end.h"
SVGADXTimestampDisjointQueryResult;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint64 inputAssemblyVertices;
uint64 inputAssemblyPrimitives;
uint64 vertexShaderInvocations;
uint64 geometryShaderInvocations;
uint64 geometryShaderPrimitives;
uint64 clipperInvocations;
uint64 clipperPrimitives;
uint64 pixelShaderInvocations;
uint64 hullShaderInvocations;
uint64 domainShaderInvocations;
uint64 computeShaderInvocations;
}
#include "vmware_pack_end.h"
SVGADXPipelineStatisticsQueryResult;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 anySamplesRendered;
}
#include "vmware_pack_end.h"
SVGADXOcclusionPredicateQueryResult;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint64 numPrimitivesWritten;
uint64 numPrimitivesRequired;
}
#include "vmware_pack_end.h"
SVGADXStreamOutStatisticsQueryResult;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 overflowed;
}
#include "vmware_pack_end.h"
SVGADXStreamOutPredicateQueryResult;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint64 samplesRendered;
}
#include "vmware_pack_end.h"
SVGADXOcclusion64QueryResult;
 
/*
* SVGADXQueryResultUnion is not intended for use in the protocol, but is
* very helpful when working with queries generically.
*/
typedef
#include "vmware_pack_begin.h"
union SVGADXQueryResultUnion {
SVGADXOcclusionQueryResult occ;
SVGADXEventQueryResult event;
SVGADXTimestampQueryResult ts;
SVGADXTimestampDisjointQueryResult tsDisjoint;
SVGADXPipelineStatisticsQueryResult pipelineStats;
SVGADXOcclusionPredicateQueryResult occPred;
SVGADXStreamOutStatisticsQueryResult soStats;
SVGADXStreamOutPredicateQueryResult soPred;
SVGADXOcclusion64QueryResult occ64;
}
#include "vmware_pack_end.h"
SVGADXQueryResultUnion;
 
 
typedef enum {
SVGA3D_QUERYSTATE_PENDING = 0, /* Query is not finished yet */
SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully */
SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully */
SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (guest only) */
} SVGA3dQueryState;
 
typedef enum {
SVGA3D_WRITE_HOST_VRAM = 1,
SVGA3D_READ_HOST_VRAM = 2,
} SVGA3dTransferType;
 
typedef enum {
SVGA3D_LOGICOP_INVALID = 0,
SVGA3D_LOGICOP_MIN = 1,
SVGA3D_LOGICOP_COPY = 1,
SVGA3D_LOGICOP_NOT = 2,
SVGA3D_LOGICOP_AND = 3,
SVGA3D_LOGICOP_OR = 4,
SVGA3D_LOGICOP_XOR = 5,
SVGA3D_LOGICOP_NXOR = 6,
SVGA3D_LOGICOP_ROP3MIN = 30, /* 7-29 are reserved for future logic ops. */
SVGA3D_LOGICOP_ROP3MAX = (SVGA3D_LOGICOP_ROP3MIN + 255),
SVGA3D_LOGICOP_MAX = (SVGA3D_LOGICOP_ROP3MAX + 1),
} SVGA3dLogicOp;
 
typedef
#include "vmware_pack_begin.h"
struct {
union {
struct {
uint16 function; /* SVGA3dFogFunction */
uint8 type; /* SVGA3dFogType */
uint8 base; /* SVGA3dFogBase */
};
uint32 uintValue;
};
}
#include "vmware_pack_end.h"
SVGA3dFogMode;
 
/*
* Uniquely identify one image (a 1D/2D/3D array) from a surface. This
* is a surface ID as well as face/mipmap indices.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGA3dSurfaceImageId {
uint32 sid;
uint32 face;
uint32 mipmap;
}
#include "vmware_pack_end.h"
SVGA3dSurfaceImageId;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 width;
uint32 height;
uint32 depth;
}
#include "vmware_pack_end.h"
SVGA3dSize;
 
/*
* Guest-backed objects definitions.
*/
typedef enum {
SVGA_OTABLE_MOB = 0,
SVGA_OTABLE_MIN = 0,
SVGA_OTABLE_SURFACE = 1,
SVGA_OTABLE_CONTEXT = 2,
SVGA_OTABLE_SHADER = 3,
SVGA_OTABLE_SCREENTARGET = 4,
 
SVGA_OTABLE_DX9_MAX = 5,
 
SVGA_OTABLE_DXCONTEXT = 5,
SVGA_OTABLE_MAX = 6
} SVGAOTableType;
 
/*
* Deprecated.
*/
#define SVGA_OTABLE_COUNT 4
 
typedef enum {
SVGA_COTABLE_MIN = 0,
SVGA_COTABLE_RTVIEW = 0,
SVGA_COTABLE_DSVIEW = 1,
SVGA_COTABLE_SRVIEW = 2,
SVGA_COTABLE_ELEMENTLAYOUT = 3,
SVGA_COTABLE_BLENDSTATE = 4,
SVGA_COTABLE_DEPTHSTENCIL = 5,
SVGA_COTABLE_RASTERIZERSTATE = 6,
SVGA_COTABLE_SAMPLER = 7,
SVGA_COTABLE_STREAMOUTPUT = 8,
SVGA_COTABLE_DXQUERY = 9,
SVGA_COTABLE_DXSHADER = 10,
SVGA_COTABLE_DX10_MAX = 11,
SVGA_COTABLE_UAVIEW = 11,
SVGA_COTABLE_MAX
} SVGACOTableType;
 
/*
* The largest size (number of entries) allowed in a COTable.
*/
#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
 
typedef enum SVGAMobFormat {
SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
SVGA3D_MOBFMT_PTDEPTH_0 = 0,
SVGA3D_MOBFMT_MIN = 0,
SVGA3D_MOBFMT_PTDEPTH_1 = 1,
SVGA3D_MOBFMT_PTDEPTH_2 = 2,
SVGA3D_MOBFMT_RANGE = 3,
SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
SVGA3D_MOBFMT_PREDX_MAX = 7,
SVGA3D_MOBFMT_EMPTY = 7,
SVGA3D_MOBFMT_MAX,
} SVGAMobFormat;
 
#define SVGA3D_MOB_EMPTY_BASE 1
 
#endif /* _SVGA3D_TYPES_H_ */
/drivers/video/drm/vmwgfx/device_include/svga_escape.h
0,0 → 1,89
/**********************************************************
* Copyright 2007-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga_escape.h --
*
* Definitions for our own (vendor-specific) SVGA Escape commands.
*/
 
#ifndef _SVGA_ESCAPE_H_
#define _SVGA_ESCAPE_H_
 
 
/*
* Namespace IDs for the escape command
*/
 
#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
 
 
/*
* Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
* the first DWORD of escape data (after the nsID and size). As a
* guideline we're using the high word and low word as a major and
* minor command number, respectively.
*
* Major command number allocation:
*
* 0000: Reserved
* 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
* 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
* 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
*/
 
#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
 
 
/*
* SVGA Hint commands.
*
* These escapes let the SVGA driver provide optional information to
* he host about the state of the guest or guest applications. The
* host can use these hints to make user interface or performance
* decisions.
*
* Notes:
*
* - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
* that use the SVGA Screen Object extension. Instead of sending
* this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
* Screen Object.
*/
 
#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 /* Deprecated */
 
typedef
struct {
uint32 command;
uint32 fullscreen;
struct {
int32 x, y;
} monitorPosition;
} SVGAEscapeHintFullscreen;
 
#endif /* _SVGA_ESCAPE_H_ */
/drivers/video/drm/vmwgfx/device_include/svga_overlay.h
0,0 → 1,199
/**********************************************************
* Copyright 2007-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga_overlay.h --
*
* Definitions for video-overlay support.
*/
 
#ifndef _SVGA_OVERLAY_H_
#define _SVGA_OVERLAY_H_
 
#include "svga_reg.h"
 
/*
* Video formats we support
*/
 
#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
 
typedef enum {
SVGA_OVERLAY_FORMAT_INVALID = 0,
SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
} SVGAOverlayFormat;
 
#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
 
#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
 
#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
/* FIFO escape layout:
* Type, Stream Id, (Register Id, Value) pairs */
 
#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
/* FIFO escape layout:
* Type, Stream Id */
 
typedef
struct SVGAEscapeVideoSetRegs {
struct {
uint32 cmdType;
uint32 streamId;
} header;
 
/* May include zero or more items. */
struct {
uint32 registerId;
uint32 value;
} items[1];
} SVGAEscapeVideoSetRegs;
 
typedef
struct SVGAEscapeVideoFlush {
uint32 cmdType;
uint32 streamId;
} SVGAEscapeVideoFlush;
 
 
/*
* Struct definitions for the video overlay commands built on
* SVGAFifoCmdEscape.
*/
typedef
struct {
uint32 command;
uint32 overlay;
} SVGAFifoEscapeCmdVideoBase;
 
typedef
struct {
SVGAFifoEscapeCmdVideoBase videoCmd;
} SVGAFifoEscapeCmdVideoFlush;
 
typedef
struct {
SVGAFifoEscapeCmdVideoBase videoCmd;
struct {
uint32 regId;
uint32 value;
} items[1];
} SVGAFifoEscapeCmdVideoSetRegs;
 
typedef
struct {
SVGAFifoEscapeCmdVideoBase videoCmd;
struct {
uint32 regId;
uint32 value;
} items[SVGA_VIDEO_NUM_REGS];
} SVGAFifoEscapeCmdVideoSetAllRegs;
 
 
/*
*----------------------------------------------------------------------
*
* VMwareVideoGetAttributes --
*
* Computes the size, pitches and offsets for YUV frames.
*
* Results:
* TRUE on success; otherwise FALSE on failure.
*
* Side effects:
* Pitches and offsets for the given YUV frame are put in 'pitches'
* and 'offsets' respectively. They are both optional though.
*
*----------------------------------------------------------------------
*/
 
static inline bool
VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
uint32 *width, /* IN / OUT */
uint32 *height, /* IN / OUT */
uint32 *size, /* OUT */
uint32 *pitches, /* OUT (optional) */
uint32 *offsets) /* OUT (optional) */
{
int tmp;
 
*width = (*width + 1) & ~1;
 
if (offsets) {
offsets[0] = 0;
}
 
switch (format) {
case VMWARE_FOURCC_YV12:
*height = (*height + 1) & ~1;
*size = (*width) * (*height);
 
if (pitches) {
pitches[0] = *width;
}
 
if (offsets) {
offsets[1] = *size;
}
 
tmp = *width >> 1;
 
if (pitches) {
pitches[1] = pitches[2] = tmp;
}
 
tmp *= (*height >> 1);
*size += tmp;
 
if (offsets) {
offsets[2] = *size;
}
 
*size += tmp;
break;
 
case VMWARE_FOURCC_YUY2:
case VMWARE_FOURCC_UYVY:
*size = *width * 2;
 
if (pitches) {
pitches[0] = *size;
}
 
*size *= *height;
break;
 
default:
return false;
}
 
return true;
}
 
#endif /* _SVGA_OVERLAY_H_ */
/drivers/video/drm/vmwgfx/device_include/svga_reg.h
0,0 → 1,1936
/**********************************************************
* Copyright 1998-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga_reg.h --
*
* Virtual hardware definitions for the VMware SVGA II device.
*/
 
#ifndef _SVGA_REG_H_
#define _SVGA_REG_H_
#include <linux/pci_ids.h>
 
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_USERLEVEL
 
#define INCLUDE_ALLOW_VMCORE
#include "includeCheck.h"
 
#include "svga_types.h"
 
/*
* SVGA_REG_ENABLE bit definitions.
*/
typedef enum {
SVGA_REG_ENABLE_DISABLE = 0,
SVGA_REG_ENABLE_ENABLE = (1 << 0),
SVGA_REG_ENABLE_HIDE = (1 << 1),
} SvgaRegEnable;
 
typedef uint32 SVGAMobId;
 
/*
* Arbitrary and meaningless limits. Please ignore these when writing
* new drivers.
*/
#define SVGA_MAX_WIDTH 2560
#define SVGA_MAX_HEIGHT 1600
 
 
#define SVGA_MAX_BITS_PER_PIXEL 32
#define SVGA_MAX_DEPTH 24
#define SVGA_MAX_DISPLAYS 10
 
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
*/
#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
 
/*
* The maximum framebuffer size that can traced for guests unless the
* SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES. In that case
* the full framebuffer can be traced independent of this limit.
*/
#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
 
#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
 
#define SVGA_MAGIC 0x900000UL
#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
 
/* Version 2 let the address of the frame buffer be unsigned on Win32 */
#define SVGA_VERSION_2 2
#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
 
/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
PALETTE_BASE has moved */
#define SVGA_VERSION_1 1
#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
 
/* Version 0 is the initial version */
#define SVGA_VERSION_0 0
#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
 
/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
#define SVGA_ID_INVALID 0xFFFFFFFF
 
/* Port offsets, relative to BAR0 */
#define SVGA_INDEX_PORT 0x0
#define SVGA_VALUE_PORT 0x1
#define SVGA_BIOS_PORT 0x2
#define SVGA_IRQSTATUS_PORT 0x8
 
/*
* Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
*
* Interrupts are only supported when the
* SVGA_CAP_IRQMASK capability is present.
*/
#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
#define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */
#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
 
/*
* Registers
*/
 
enum {
SVGA_REG_ID = 0,
SVGA_REG_ENABLE = 1,
SVGA_REG_WIDTH = 2,
SVGA_REG_HEIGHT = 3,
SVGA_REG_MAX_WIDTH = 4,
SVGA_REG_MAX_HEIGHT = 5,
SVGA_REG_DEPTH = 6,
SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */
SVGA_REG_PSEUDOCOLOR = 8,
SVGA_REG_RED_MASK = 9,
SVGA_REG_GREEN_MASK = 10,
SVGA_REG_BLUE_MASK = 11,
SVGA_REG_BYTES_PER_LINE = 12,
SVGA_REG_FB_START = 13, /* (Deprecated) */
SVGA_REG_FB_OFFSET = 14,
SVGA_REG_VRAM_SIZE = 15,
SVGA_REG_FB_SIZE = 16,
 
/* ID 0 implementation only had the above registers, then the palette */
SVGA_REG_ID_0_TOP = 17,
 
SVGA_REG_CAPABILITIES = 17,
SVGA_REG_MEM_START = 18, /* (Deprecated) */
SVGA_REG_MEM_SIZE = 19,
SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */
SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */
SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */
SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */
SVGA_REG_IRQMASK = 33, /* Interrupt mask */
 
/* Legacy multi-monitor support */
SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */
SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */
SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */
 
/* See "Guest memory regions" below. */
SVGA_REG_GMR_ID = 41,
SVGA_REG_GMR_DESCRIPTOR = 42,
SVGA_REG_GMR_MAX_IDS = 43,
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
 
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
SVGA_REG_CMD_PREPEND_HIGH = 54,
SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
SVGA_REG_MOB_MAX_SIZE = 57,
SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
 
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
/* Base of scratch registers */
/* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
First 4 are reserved for VESA BIOS Extension; any remaining are for
the use of the current SVGA driver. */
};
 
/*
* Guest memory regions (GMRs):
*
* This is a new memory mapping feature available in SVGA devices
* which have the SVGA_CAP_GMR bit set. Previously, there were two
* fixed memory regions available with which to share data between the
* device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
* are our name for an extensible way of providing arbitrary DMA
* buffers for use between the driver and the SVGA device. They are a
* new alternative to framebuffer memory, usable for both 2D and 3D
* graphics operations.
*
* Since GMR mapping must be done synchronously with guest CPU
* execution, we use a new pair of SVGA registers:
*
* SVGA_REG_GMR_ID --
*
* Read/write.
* This register holds the 32-bit ID (a small positive integer)
* of a GMR to create, delete, or redefine. Writing this register
* has no side-effects.
*
* SVGA_REG_GMR_DESCRIPTOR --
*
* Write-only.
* Writing this register will create, delete, or redefine the GMR
* specified by the above ID register. If this register is zero,
* the GMR is deleted. Any pointers into this GMR (including those
* currently being processed by FIFO commands) will be
* synchronously invalidated.
*
* If this register is nonzero, it must be the physical page
* number (PPN) of a data structure which describes the physical
* layout of the memory region this GMR should describe. The
* descriptor structure will be read synchronously by the SVGA
* device when this register is written. The descriptor need not
* remain allocated for the lifetime of the GMR.
*
* The guest driver should write SVGA_REG_GMR_ID first, then
* SVGA_REG_GMR_DESCRIPTOR.
*
* SVGA_REG_GMR_MAX_IDS --
*
* Read-only.
* The SVGA device may choose to support a maximum number of
* user-defined GMR IDs. This register holds the number of supported
* IDs. (The maximum supported ID plus 1)
*
* SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
*
* Read-only.
* The SVGA device may choose to put a limit on the total number
* of SVGAGuestMemDescriptor structures it will read when defining
* a single GMR.
*
* The descriptor structure is an array of SVGAGuestMemDescriptor
* structures. Each structure may do one of three things:
*
* - Terminate the GMR descriptor list.
* (ppn==0, numPages==0)
*
* - Add a PPN or range of PPNs to the GMR's virtual address space.
* (ppn != 0, numPages != 0)
*
* - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
* support multi-page GMR descriptor tables without forcing the
* driver to allocate physically contiguous memory.
* (ppn != 0, numPages == 0)
*
* Note that each physical page of SVGAGuestMemDescriptor structures
* can describe at least 2MB of guest memory. If the driver needs to
* use more than one page of descriptor structures, it must use one of
* its SVGAGuestMemDescriptors to point to an additional page. The
* device will never automatically cross a page boundary.
*
* Once the driver has described a GMR, it is immediately available
* for use via any FIFO command that uses an SVGAGuestPtr structure.
* These pointers include a GMR identifier plus an offset into that
* GMR.
*
* The driver must check the SVGA_CAP_GMR bit before using the GMR
* registers.
*/
 
/*
* Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
* memory as well. In the future, these IDs could even be used to
* allow legacy memory regions to be redefined by the guest as GMRs.
*
* Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
* is being phased out. Please try to use user-defined GMRs whenever
* possible.
*/
#define SVGA_GMR_NULL ((uint32) -1)
#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
 
typedef
#include "vmware_pack_begin.h"
struct SVGAGuestMemDescriptor {
uint32 ppn;
uint32 numPages;
}
#include "vmware_pack_end.h"
SVGAGuestMemDescriptor;
 
typedef
#include "vmware_pack_begin.h"
struct SVGAGuestPtr {
uint32 gmrId;
uint32 offset;
}
#include "vmware_pack_end.h"
SVGAGuestPtr;
 
/*
* Register based command buffers --
*
* Provide an SVGA device interface that allows the guest to submit
* command buffers to the SVGA device through an SVGA device register.
* The metadata for each command buffer is contained in the
* SVGACBHeader structure along with the return status codes.
*
* The SVGA device supports command buffers if
* SVGA_CAP_COMMAND_BUFFERS is set in the device caps register. The
* fifo must be enabled for command buffers to be submitted.
*
* Command buffers are submitted when the guest writing the 64 byte
* aligned physical address into the SVGA_REG_COMMAND_LOW and
* SVGA_REG_COMMAND_HIGH. SVGA_REG_COMMAND_HIGH contains the upper 32
* bits of the physical address. SVGA_REG_COMMAND_LOW contains the
* lower 32 bits of the physical address, since the command buffer
* headers are required to be 64 byte aligned the lower 6 bits are
* used for the SVGACBContext value. Writing to SVGA_REG_COMMAND_LOW
* submits the command buffer to the device and queues it for
* execution. The SVGA device supports at least
* SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
* per context and if that limit is reached the device will write the
* status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
* buffer header synchronously and not raise any IRQs.
*
* It is invalid to submit a command buffer without a valid physical
* address and results are undefined.
*
* The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
* will be supported. If a larger command buffer is submitted results
* are unspecified and the device will either complete the command
* buffer or return an error.
*
* The device guarantees that any individual command in a command
* buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
* enough to fit a 64x64 color-cursor definition. If the command is
* too large the device is allowed to process the command or return an
* error.
*
* The device context is a special SVGACBContext that allows for
* synchronous register like accesses with the flexibility of
* commands. There is a different command set defined by
* SVGADeviceContextCmdId. The commands in each command buffer is not
* allowed to straddle physical pages.
*
* The offset field which is available starting with the
* SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
* start of command processing into the buffer. If an error is
* encountered the errorOffset will still be relative to the specific
* PA, not biased by the offset. When the command buffer is finished
* the guest should not read the offset field as there is no guarantee
* what it will set to.
*/
 
#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */
#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
 
#define SVGA_CB_CONTEXT_MASK 0x3f
typedef enum {
SVGA_CB_CONTEXT_DEVICE = 0x3f,
SVGA_CB_CONTEXT_0 = 0x0,
SVGA_CB_CONTEXT_MAX = 0x1,
} SVGACBContext;
 
 
typedef enum {
/*
* The guest is supposed to write SVGA_CB_STATUS_NONE to the status
* field before submitting the command buffer header, the host will
* change the value when it is done with the command buffer.
*/
SVGA_CB_STATUS_NONE = 0,
 
/*
* Written by the host when a command buffer completes successfully.
* The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
* the SVGA_CB_FLAG_NO_IRQ flag is set.
*/
SVGA_CB_STATUS_COMPLETED = 1,
 
/*
* Written by the host synchronously with the command buffer
* submission to indicate the command buffer was not submitted. No
* IRQ is raised.
*/
SVGA_CB_STATUS_QUEUE_FULL = 2,
 
/*
* Written by the host when an error was detected parsing a command
* in the command buffer, errorOffset is written to contain the
* offset to the first byte of the failing command. The device
* raises the IRQ with both SVGA_IRQFLAG_ERROR and
* SVGA_IRQFLAG_COMMAND_BUFFER. Some of the commands may have been
* processed.
*/
SVGA_CB_STATUS_COMMAND_ERROR = 3,
 
/*
* Written by the host if there is an error parsing the command
* buffer header. The device raises the IRQ with both
* SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER. The device
* did not processes any of the command buffer.
*/
SVGA_CB_STATUS_CB_HEADER_ERROR = 4,
 
/*
* Written by the host if the guest requested the host to preempt
* the command buffer. The device will not raise any IRQs and the
* command buffer was not processed.
*/
SVGA_CB_STATUS_PREEMPTED = 5,
 
/*
* Written by the host synchronously with the command buffer
* submission to indicate the the command buffer was not submitted
* due to an error. No IRQ is raised.
*/
SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
} SVGACBStatus;
 
typedef enum {
SVGA_CB_FLAG_NONE = 0,
SVGA_CB_FLAG_NO_IRQ = 1 << 0,
SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
SVGA_CB_FLAG_MOB = 1 << 2,
} SVGACBFlags;
 
typedef
#include "vmware_pack_begin.h"
struct {
volatile SVGACBStatus status;
volatile uint32 errorOffset;
uint64 id;
SVGACBFlags flags;
uint32 length;
union {
PA pa;
struct {
SVGAMobId mobid;
uint32 mobOffset;
} mob;
} ptr;
uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
uint32 mustBeZero[6];
}
#include "vmware_pack_end.h"
SVGACBHeader;
 
typedef enum {
SVGA_DC_CMD_NOP = 0,
SVGA_DC_CMD_START_STOP_CONTEXT = 1,
SVGA_DC_CMD_PREEMPT = 2,
SVGA_DC_CMD_MAX = 3,
SVGA_DC_CMD_FORCE_UINT = MAX_UINT32,
} SVGADeviceContextCmdId;
 
typedef struct {
uint32 enable;
SVGACBContext context;
} SVGADCCmdStartStop;
 
/*
* SVGADCCmdPreempt --
*
* This command allows the guest to request that all command buffers
* on the specified context be preempted that can be. After execution
* of this command all command buffers that were preempted will
* already have SVGA_CB_STATUS_PREEMPTED written into the status
* field. The device might still be processing a command buffer,
* assuming execution of it started before the preemption request was
* received. Specifying the ignoreIDZero flag to TRUE will cause the
* device to not preempt command buffers with the id field in the
* command buffer header set to zero.
*/
 
typedef struct {
SVGACBContext context;
uint32 ignoreIDZero;
} SVGADCCmdPreempt;
 
/*
* SVGAGMRImageFormat --
*
* This is a packed representation of the source 2D image format
* for a GMR-to-screen blit. Currently it is defined as an encoding
* of the screen's color depth and bits-per-pixel, however, 16 bits
* are reserved for future use to identify other encodings (such as
* RGBA or higher-precision images).
*
* Currently supported formats:
*
* bpp depth Format Name
* --- ----- -----------
* 32 24 32-bit BGRX
* 24 24 24-bit BGR
* 16 16 RGB 5-6-5
* 16 15 RGB 5-5-5
*
*/
 
typedef struct SVGAGMRImageFormat {
union {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
uint32 reserved : 16; /* Must be zero */
};
 
uint32 value;
};
} SVGAGMRImageFormat;
 
typedef
#include "vmware_pack_begin.h"
struct SVGAGuestImage {
SVGAGuestPtr ptr;
 
/*
* A note on interpretation of pitch: This value of pitch is the
* number of bytes between vertically adjacent image
* blocks. Normally this is the number of bytes between the first
* pixel of two adjacent scanlines. With compressed textures,
* however, this may represent the number of bytes between
* compression blocks rather than between rows of pixels.
*
* XXX: Compressed textures currently must be tightly packed in guest memory.
*
* If the image is 1-dimensional, pitch is ignored.
*
* If 'pitch' is zero, the SVGA3D device calculates a pitch value
* assuming each row of blocks is tightly packed.
*/
uint32 pitch;
}
#include "vmware_pack_end.h"
SVGAGuestImage;
 
/*
* SVGAColorBGRX --
*
* A 24-bit color format (BGRX), which does not depend on the
* format of the legacy guest framebuffer (GFB) or the current
* GMRFB state.
*/
 
typedef struct SVGAColorBGRX {
union {
struct {
uint32 b : 8;
uint32 g : 8;
uint32 r : 8;
uint32 x : 8; /* Unused */
};
 
uint32 value;
};
} SVGAColorBGRX;
 
 
/*
* SVGASignedRect --
* SVGASignedPoint --
*
* Signed rectangle and point primitives. These are used by the new
* 2D primitives for drawing to Screen Objects, which can occupy a
* signed virtual coordinate space.
*
* SVGASignedRect specifies a half-open interval: the (left, top)
* pixel is part of the rectangle, but the (right, bottom) pixel is
* not.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
int32 left;
int32 top;
int32 right;
int32 bottom;
}
#include "vmware_pack_end.h"
SVGASignedRect;
 
typedef
#include "vmware_pack_begin.h"
struct {
int32 x;
int32 y;
}
#include "vmware_pack_end.h"
SVGASignedPoint;
 
 
/*
* SVGA Device Capabilities
*
* Note the holes in the bitfield. Missing bits have been deprecated,
* and must not be reused. Those capabilities will never be reported
* by new versions of the SVGA device.
*
* XXX: Add longer descriptions for each capability, including a list
* of the new features that each capability provides.
*
* SVGA_CAP_IRQMASK --
* Provides device interrupts. Adds device register SVGA_REG_IRQMASK
* to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
* set/clear pending interrupts.
*
* SVGA_CAP_GMR --
* Provides synchronous mapping of guest memory regions (GMR).
* Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
* SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
*
* SVGA_CAP_TRACES --
* Allows framebuffer trace-based updates even when FIFO is enabled.
* Adds device register SVGA_REG_TRACES.
*
* SVGA_CAP_GMR2 --
* Provides asynchronous commands to define and remap guest memory
* regions. Adds device registers SVGA_REG_GMRS_MAX_PAGES and
* SVGA_REG_MEMORY_SIZE.
*
* SVGA_CAP_SCREEN_OBJECT_2 --
* Allow screen object support, and require backing stores from the
* guest for each screen object.
*
* SVGA_CAP_COMMAND_BUFFERS --
* Enable register based command buffer submission.
*
* SVGA_CAP_DEAD1 --
* This cap was incorrectly used by old drivers and should not be
* reused.
*
* SVGA_CAP_CMD_BUFFERS_2 --
* Enable support for the prepend command buffer submision
* registers. SVGA_REG_CMD_PREPEND_LOW and
* SVGA_REG_CMD_PREPEND_HIGH.
*
* SVGA_CAP_GBOBJECTS --
* Enable guest-backed objects and surfaces.
*
* SVGA_CAP_CMD_BUFFERS_3 --
* Enable support for command buffers in a mob.
*/
 
#define SVGA_CAP_NONE 0x00000000
#define SVGA_CAP_RECT_COPY 0x00000002
#define SVGA_CAP_CURSOR 0x00000020
#define SVGA_CAP_CURSOR_BYPASS 0x00000040
#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080
#define SVGA_CAP_8BIT_EMULATION 0x00000100
#define SVGA_CAP_ALPHA_CURSOR 0x00000200
#define SVGA_CAP_3D 0x00004000
#define SVGA_CAP_EXTENDED_FIFO 0x00008000
#define SVGA_CAP_MULTIMON 0x00010000
#define SVGA_CAP_PITCHLOCK 0x00020000
#define SVGA_CAP_IRQMASK 0x00040000
#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000
#define SVGA_CAP_GMR 0x00100000
#define SVGA_CAP_TRACES 0x00200000
#define SVGA_CAP_GMR2 0x00400000
#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
#define SVGA_CAP_COMMAND_BUFFERS 0x01000000
#define SVGA_CAP_DEAD1 0x02000000
#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
#define SVGA_CAP_GBOBJECTS 0x08000000
#define SVGA_CAP_DX 0x10000000
 
#define SVGA_CAP_CMD_RESERVED 0x80000000
 
 
/*
* The Guest can optionally read some SVGA device capabilities through
* the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
* the SVGA device is initialized. The type of capability the guest
* is requesting from the SVGABackdoorCapType enum should be placed in
* the upper 16 bits of the backdoor command id (ECX). On success the
* the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
* the requested capability. If the command is not supported then EBX
* will be left unchanged and EAX will be set to -1. Because it is
* possible that -1 is the value of the requested cap the correct way
* to check if the command was successful is to check if EBX was changed
* to BDOOR_MAGIC making sure to initialize the register to something
* else first.
*/
 
typedef enum {
SVGABackdoorCapDeviceCaps = 0,
SVGABackdoorCapFifoCaps = 1,
SVGABackdoorCap3dHWVersion = 2,
SVGABackdoorCapMax = 3,
} SVGABackdoorCapType;
 
 
/*
* FIFO register indices.
*
* The FIFO is a chunk of device memory mapped into guest physmem. It
* is always treated as 32-bit words.
*
* The guest driver gets to decide how to partition it between
* - FIFO registers (there are always at least 4, specifying where the
* following data area is and how much data it contains; there may be
* more registers following these, depending on the FIFO protocol
* version in use)
* - FIFO data, written by the guest and slurped out by the VMX.
* These indices are 32-bit word offsets into the FIFO.
*/
 
enum {
/*
* Block 1 (basic registers): The originally defined FIFO registers.
* These exist and are valid for all versions of the FIFO protocol.
*/
 
SVGA_FIFO_MIN = 0,
SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
SVGA_FIFO_NEXT_CMD,
SVGA_FIFO_STOP,
 
/*
* Block 2 (extended registers): Mandatory registers for the extended
* FIFO. These exist if the SVGA caps register includes
* SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
* associated capability bit is enabled.
*
* Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
* support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
* This means that the guest has to test individually (in most cases
* using FIFO caps) for the presence of registers after this; the VMX
* can define "extended FIFO" to mean whatever it wants, and currently
* won't enable it unless there's room for that set and much more.
*/
 
SVGA_FIFO_CAPABILITIES = 4,
SVGA_FIFO_FLAGS,
/* Valid with SVGA_FIFO_CAP_FENCE: */
SVGA_FIFO_FENCE,
 
/*
* Block 3a (optional extended registers): Additional registers for the
* extended FIFO, whose presence isn't actually implied by
* SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
* leave room for them.
*
* These in block 3a, the VMX currently considers mandatory for the
* extended FIFO.
*/
 
/* Valid if exists (i.e. if extended FIFO enabled): */
SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
/* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
SVGA_FIFO_PITCHLOCK,
 
/* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
 
/* Valid with SVGA_FIFO_CAP_RESERVE: */
SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
 
/*
* Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
*
* By default this is SVGA_ID_INVALID, to indicate that the cursor
* coordinates are specified relative to the virtual root. If this
* is set to a specific screen ID, cursor position is reinterpreted
* as a signed offset relative to that screen's origin.
*/
SVGA_FIFO_CURSOR_SCREEN_ID,
 
/*
* Valid with SVGA_FIFO_CAP_DEAD
*
* An arbitrary value written by the host, drivers should not use it.
*/
SVGA_FIFO_DEAD,
 
/*
* Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
*
* Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
* on platforms that can enforce graphics resource limits.
*/
SVGA_FIFO_3D_HWVERSION_REVISED,
 
/*
* XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
* registers, but this must be done carefully and with judicious use of
* capability bits, since comparisons based on SVGA_FIFO_MIN aren't
* enough to tell you whether the register exists: we've shipped drivers
* and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
* the earlier ones. The actual order of introduction was:
* - PITCHLOCK
* - 3D_CAPS
* - CURSOR_* (cursor bypass 3)
* - RESERVED
* So, code that wants to know whether it can use any of the
* aforementioned registers, or anything else added after PITCHLOCK and
* before 3D_CAPS, needs to reason about something other than
* SVGA_FIFO_MIN.
*/
 
/*
* 3D caps block space; valid with 3D hardware version >=
* SVGA3D_HWVERSION_WS6_B1.
*/
SVGA_FIFO_3D_CAPS = 32,
SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
 
/*
* End of VMX's current definition of "extended-FIFO registers".
* Registers before here are always enabled/disabled as a block; either
* the extended FIFO is enabled and includes all preceding registers, or
* it's disabled entirely.
*
* Block 3b (truly optional extended registers): Additional registers for
* the extended FIFO, which the VMX already knows how to enable and
* disable with correct granularity.
*
* Registers after here exist if and only if the guest SVGA driver
* sets SVGA_FIFO_MIN high enough to leave room for them.
*/
 
/* Valid if register exists: */
SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
 
/*
* Always keep this last. This defines the maximum number of
* registers we know about. At power-on, this value is placed in
* the SVGA_REG_MEM_REGS register, and we expect the guest driver
* to allocate this much space in FIFO memory for registers.
*/
SVGA_FIFO_NUM_REGS
};
 
 
/*
* Definition of registers included in extended FIFO support.
*
* The guest SVGA driver gets to allocate the FIFO between registers
* and data. It must always allocate at least 4 registers, but old
* drivers stopped there.
*
* The VMX will enable extended FIFO support if and only if the guest
* left enough room for all registers defined as part of the mandatory
* set for the extended FIFO.
*
* Note that the guest drivers typically allocate the FIFO only at
* initialization time, not at mode switches, so it's likely that the
* number of FIFO registers won't change without a reboot.
*
* All registers less than this value are guaranteed to be present if
* svgaUser->fifo.extended is set. Any later registers must be tested
* individually for compatibility at each use (in the VMX).
*
* This value is used only by the VMX, so it can change without
* affecting driver compatibility; keep it that way?
*/
#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
 
 
/*
* FIFO Synchronization Registers
*
* This explains the relationship between the various FIFO
* sync-related registers in IOSpace and in FIFO space.
*
* SVGA_REG_SYNC --
*
* The SYNC register can be used in two different ways by the guest:
*
* 1. If the guest wishes to fully sync (drain) the FIFO,
* it will write once to SYNC then poll on the BUSY
* register. The FIFO is sync'ed once BUSY is zero.
*
* 2. If the guest wants to asynchronously wake up the host,
* it will write once to SYNC without polling on BUSY.
* Ideally it will do this after some new commands have
* been placed in the FIFO, and after reading a zero
* from SVGA_FIFO_BUSY.
*
* (1) is the original behaviour that SYNC was designed to
* support. Originally, a write to SYNC would implicitly
* trigger a read from BUSY. This causes us to synchronously
* process the FIFO.
*
* This behaviour has since been changed so that writing SYNC
* will *not* implicitly cause a read from BUSY. Instead, it
* makes a channel call which asynchronously wakes up the MKS
* thread.
*
* New guests can use this new behaviour to implement (2)
* efficiently. This lets guests get the host's attention
* without waiting for the MKS to poll, which gives us much
* better CPU utilization on SMP hosts and on UP hosts while
* we're blocked on the host GPU.
*
* Old guests shouldn't notice the behaviour change. SYNC was
* never guaranteed to process the entire FIFO, since it was
* bounded to a particular number of CPU cycles. Old guests will
* still loop on the BUSY register until the FIFO is empty.
*
* Writing to SYNC currently has the following side-effects:
*
* - Sets SVGA_REG_BUSY to TRUE (in the monitor)
* - Asynchronously wakes up the MKS thread for FIFO processing
* - The value written to SYNC is recorded as a "reason", for
* stats purposes.
*
* If SVGA_FIFO_BUSY is available, drivers are advised to only
* write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
* SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
* eventually set SVGA_FIFO_BUSY on its own, but this approach
* lets the driver avoid sending multiple asynchronous wakeup
* messages to the MKS thread.
*
* SVGA_REG_BUSY --
*
* This register is set to TRUE when SVGA_REG_SYNC is written,
* and it reads as FALSE when the FIFO has been completely
* drained.
*
* Every read from this register causes us to synchronously
* process FIFO commands. There is no guarantee as to how many
* commands each read will process.
*
* CPU time spent processing FIFO commands will be billed to
* the guest.
*
* New drivers should avoid using this register unless they
* need to guarantee that the FIFO is completely drained. It
* is overkill for performing a sync-to-fence. Older drivers
* will use this register for any type of synchronization.
*
* SVGA_FIFO_BUSY --
*
* This register is a fast way for the guest driver to check
* whether the FIFO is already being processed. It reads and
* writes at normal RAM speeds, with no monitor intervention.
*
* If this register reads as TRUE, the host is guaranteeing that
* any new commands written into the FIFO will be noticed before
* the MKS goes back to sleep.
*
* If this register reads as FALSE, no such guarantee can be
* made.
*
* The guest should use this register to quickly determine
* whether or not it needs to wake up the host. If the guest
* just wrote a command or group of commands that it would like
* the host to begin processing, it should:
*
* 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
* action is necessary.
*
* 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
* code that we've already sent a SYNC to the host and we
* don't need to send a duplicate.
*
* 3. Write a reason to SVGA_REG_SYNC. This will send an
* asynchronous wakeup to the MKS thread.
*/
 
 
/*
* FIFO Capabilities
*
* Fence -- Fence register and command are supported
* Accel Front -- Front buffer only commands are supported
* Pitch Lock -- Pitch lock register is supported
* Video -- SVGA Video overlay units are supported
* Escape -- Escape command is supported
*
* XXX: Add longer descriptions for each capability, including a list
* of the new features that each capability provides.
*
* SVGA_FIFO_CAP_SCREEN_OBJECT --
*
* Provides dynamic multi-screen rendering, for improved Unity and
* multi-monitor modes. With Screen Object, the guest can
* dynamically create and destroy 'screens', which can represent
* Unity windows or virtual monitors. Screen Object also provides
* strong guarantees that DMA operations happen only when
* guest-initiated. Screen Object deprecates the BAR1 guest
* framebuffer (GFB) and all commands that work only with the GFB.
*
* New registers:
* FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
*
* New 2D commands:
* DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
* BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
*
* New 3D commands:
* BLIT_SURFACE_TO_SCREEN
*
* New guarantees:
*
* - The host will not read or write guest memory, including the GFB,
* except when explicitly initiated by a DMA command.
*
* - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
* is guaranteed to complete before any subsequent FENCEs.
*
* - All legacy commands which affect a Screen (UPDATE, PRESENT,
* PRESENT_READBACK) as well as new Screen blit commands will
* all behave consistently as blits, and memory will be read
* or written in FIFO order.
*
* For example, if you PRESENT from one SVGA3D surface to multiple
* places on the screen, the data copied will always be from the
* SVGA3D surface at the time the PRESENT was issued in the FIFO.
* This was not necessarily true on devices without Screen Object.
*
* This means that on devices that support Screen Object, the
* PRESENT_READBACK command should not be necessary unless you
* actually want to read back the results of 3D rendering into
* system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
* command provides a strict superset of functionality.)
*
* - When a screen is resized, either using Screen Object commands or
* legacy multimon registers, its contents are preserved.
*
* SVGA_FIFO_CAP_GMR2 --
*
* Provides new commands to define and remap guest memory regions (GMR).
*
* New 2D commands:
* DEFINE_GMR2, REMAP_GMR2.
*
* SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
*
* Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
* This register may replace SVGA_FIFO_3D_HWVERSION on platforms
* that enforce graphics resource limits. This allows the platform
* to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
* drivers that do not limit their resources.
*
* Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
* are codependent (and thus we use a single capability bit).
*
* SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
*
* Modifies the DEFINE_SCREEN command to include a guest provided
* backing store in GMR memory and the bytesPerLine for the backing
* store. This capability requires the use of a backing store when
* creating screen objects. However if SVGA_FIFO_CAP_SCREEN_OBJECT
* is present then backing stores are optional.
*
* SVGA_FIFO_CAP_DEAD --
*
* Drivers should not use this cap bit. This cap bit can not be
* reused since some hosts already expose it.
*/
 
#define SVGA_FIFO_CAP_NONE 0
#define SVGA_FIFO_CAP_FENCE (1<<0)
#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
#define SVGA_FIFO_CAP_VIDEO (1<<3)
#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
#define SVGA_FIFO_CAP_ESCAPE (1<<5)
#define SVGA_FIFO_CAP_RESERVE (1<<6)
#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
#define SVGA_FIFO_CAP_GMR2 (1<<8)
#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2
#define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1<<9)
#define SVGA_FIFO_CAP_DEAD (1<<10)
 
 
/*
* FIFO Flags
*
* Accel Front -- Driver should use front buffer only commands
*/
 
#define SVGA_FIFO_FLAG_NONE 0
#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
#define SVGA_FIFO_FLAG_RESERVED (1<<31) /* Internal use only */
 
/*
* FIFO reservation sentinel value
*/
 
#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
 
 
/*
* Video overlay support
*/
 
#define SVGA_NUM_OVERLAY_UNITS 32
 
 
/*
* Video capabilities that the guest is currently using
*/
 
#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
 
 
/*
* Offsets for the video overlay registers
*/
 
enum {
SVGA_VIDEO_ENABLED = 0,
SVGA_VIDEO_FLAGS,
SVGA_VIDEO_DATA_OFFSET,
SVGA_VIDEO_FORMAT,
SVGA_VIDEO_COLORKEY,
SVGA_VIDEO_SIZE, /* Deprecated */
SVGA_VIDEO_WIDTH,
SVGA_VIDEO_HEIGHT,
SVGA_VIDEO_SRC_X,
SVGA_VIDEO_SRC_Y,
SVGA_VIDEO_SRC_WIDTH,
SVGA_VIDEO_SRC_HEIGHT,
SVGA_VIDEO_DST_X, /* Signed int32 */
SVGA_VIDEO_DST_Y, /* Signed int32 */
SVGA_VIDEO_DST_WIDTH,
SVGA_VIDEO_DST_HEIGHT,
SVGA_VIDEO_PITCH_1,
SVGA_VIDEO_PITCH_2,
SVGA_VIDEO_PITCH_3,
SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
/* (SVGA_ID_INVALID) */
SVGA_VIDEO_NUM_REGS
};
 
 
/*
* SVGA Overlay Units
*
* width and height relate to the entire source video frame.
* srcX, srcY, srcWidth and srcHeight represent subset of the source
* video frame to be displayed.
*/
 
typedef
#include "vmware_pack_begin.h"
struct SVGAOverlayUnit {
uint32 enabled;
uint32 flags;
uint32 dataOffset;
uint32 format;
uint32 colorKey;
uint32 size;
uint32 width;
uint32 height;
uint32 srcX;
uint32 srcY;
uint32 srcWidth;
uint32 srcHeight;
int32 dstX;
int32 dstY;
uint32 dstWidth;
uint32 dstHeight;
uint32 pitches[3];
uint32 dataGMRId;
uint32 dstScreenId;
}
#include "vmware_pack_end.h"
SVGAOverlayUnit;
 
 
/*
* Guest display topology
*
* XXX: This structure is not part of the SVGA device's interface, and
* doesn't really belong here.
*/
#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
 
typedef struct SVGADisplayTopology {
uint16 displayId;
uint16 isPrimary;
uint32 width;
uint32 height;
uint32 positionX;
uint32 positionY;
} SVGADisplayTopology;
 
 
/*
* SVGAScreenObject --
*
* This is a new way to represent a guest's multi-monitor screen or
* Unity window. Screen objects are only supported if the
* SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
*
* If Screen Objects are supported, they can be used to fully
* replace the functionality provided by the framebuffer registers
* (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
*
* The screen object is a struct with guaranteed binary
* compatibility. New flags can be added, and the struct may grow,
* but existing fields must retain their meaning.
*
* Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
* a SVGAGuestPtr that is used to back the screen contents. This
* memory must come from the GFB. The guest is not allowed to
* access the memory and doing so will have undefined results. The
* backing store is required to be page aligned and the size is
* padded to the next page boundry. The number of pages is:
* (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
*
* The pitch in the backingStore is required to be at least large
* enough to hold a 32bbp scanline. It is recommended that the
* driver pad bytesPerLine for a potential performance win.
*
* The cloneCount field is treated as a hint from the guest that
* the user wants this display to be cloned, countCount times. A
* value of zero means no cloning should happen.
*/
 
#define SVGA_SCREEN_MUST_BE_SET (1 << 0)
#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
#define SVGA_SCREEN_IS_PRIMARY (1 << 1)
#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
 
/*
* Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
* deactivated the base layer is defined to lose all contents and
* become black. When a screen is deactivated the backing store is
* optional. When set backingPtr and bytesPerLine will be ignored.
*/
#define SVGA_SCREEN_DEACTIVATE (1 << 3)
 
/*
* Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When this flag is set
* the screen contents will be outputted as all black to the user
* though the base layer contents is preserved. The screen base layer
* can still be read and written to like normal though the no visible
* effect will be seen by the user. When the flag is changed the
* screen will be blanked or redrawn to the current contents as needed
* without any extra commands from the driver. This flag only has an
* effect when the screen is not deactivated.
*/
#define SVGA_SCREEN_BLANKING (1 << 4)
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 structSize; /* sizeof(SVGAScreenObject) */
uint32 id;
uint32 flags;
struct {
uint32 width;
uint32 height;
} size;
struct {
int32 x;
int32 y;
} root;
 
/*
* Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
* with SVGA_FIFO_CAP_SCREEN_OBJECT.
*/
SVGAGuestImage backingStore;
 
/*
* The cloneCount field is treated as a hint from the guest that
* the user wants this display to be cloned, cloneCount times.
*
* A value of zero means no cloning should happen.
*/
uint32 cloneCount;
}
#include "vmware_pack_end.h"
SVGAScreenObject;
 
 
/*
* Commands in the command FIFO:
*
* Command IDs defined below are used for the traditional 2D FIFO
* communication (not all commands are available for all versions of the
* SVGA FIFO protocol).
*
* Note the holes in the command ID numbers: These commands have been
* deprecated, and the old IDs must not be reused.
*
* Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
* protocol.
*
* Each command's parameters are described by the comments and
* structs below.
*/
 
typedef enum {
SVGA_CMD_INVALID_CMD = 0,
SVGA_CMD_UPDATE = 1,
SVGA_CMD_RECT_COPY = 3,
SVGA_CMD_RECT_ROP_COPY = 14,
SVGA_CMD_DEFINE_CURSOR = 19,
SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
SVGA_CMD_UPDATE_VERBOSE = 25,
SVGA_CMD_FRONT_ROP_FILL = 29,
SVGA_CMD_FENCE = 30,
SVGA_CMD_ESCAPE = 33,
SVGA_CMD_DEFINE_SCREEN = 34,
SVGA_CMD_DESTROY_SCREEN = 35,
SVGA_CMD_DEFINE_GMRFB = 36,
SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37,
SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
SVGA_CMD_ANNOTATION_FILL = 39,
SVGA_CMD_ANNOTATION_COPY = 40,
SVGA_CMD_DEFINE_GMR2 = 41,
SVGA_CMD_REMAP_GMR2 = 42,
SVGA_CMD_DEAD = 43,
SVGA_CMD_DEAD_2 = 44,
SVGA_CMD_NOP = 45,
SVGA_CMD_NOP_ERROR = 46,
SVGA_CMD_MAX
} SVGAFifoCmdId;
 
#define SVGA_CMD_MAX_DATASIZE (256 * 1024)
#define SVGA_CMD_MAX_ARGS 64
 
 
/*
* SVGA_CMD_UPDATE --
*
* This is a DMA transfer which copies from the Guest Framebuffer
* (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
* intersect with the provided virtual rectangle.
*
* This command does not support using arbitrary guest memory as a
* data source- it only works with the pre-defined GFB memory.
* This command also does not support signed virtual coordinates.
* If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
* negative root x/y coordinates, the negative portion of those
* screens will not be reachable by this command.
*
* This command is not necessary when using framebuffer
* traces. Traces are automatically enabled if the SVGA FIFO is
* disabled, and you may explicitly enable/disable traces using
* SVGA_REG_TRACES. With traces enabled, any write to the GFB will
* automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
*
* Traces and SVGA_CMD_UPDATE are the only supported ways to render
* pseudocolor screen updates. The newer Screen Object commands
* only support true color formats.
*
* Availability:
* Always available.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 x;
uint32 y;
uint32 width;
uint32 height;
}
#include "vmware_pack_end.h"
SVGAFifoCmdUpdate;
 
 
/*
* SVGA_CMD_RECT_COPY --
*
* Perform a rectangular DMA transfer from one area of the GFB to
* another, and copy the result to any screens which intersect it.
*
* Availability:
* SVGA_CAP_RECT_COPY
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 srcX;
uint32 srcY;
uint32 destX;
uint32 destY;
uint32 width;
uint32 height;
}
#include "vmware_pack_end.h"
SVGAFifoCmdRectCopy;
 
 
/*
* SVGA_CMD_RECT_ROP_COPY --
*
* Perform a rectangular DMA transfer from one area of the GFB to
* another, and copy the result to any screens which intersect it.
* The value of ROP may only be SVGA_ROP_COPY, and this command is
* only supported for backwards compatibility reasons.
*
* Availability:
* SVGA_CAP_RECT_COPY
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 srcX;
uint32 srcY;
uint32 destX;
uint32 destY;
uint32 width;
uint32 height;
uint32 rop;
}
#include "vmware_pack_end.h"
SVGAFifoCmdRectRopCopy;
 
 
/*
* SVGA_CMD_DEFINE_CURSOR --
*
* Provide a new cursor image, as an AND/XOR mask.
*
* The recommended way to position the cursor overlay is by using
* the SVGA_FIFO_CURSOR_* registers, supported by the
* SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
*
* Availability:
* SVGA_CAP_CURSOR
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
uint32 andMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
uint32 xorMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
/*
* Followed by scanline data for AND mask, then XOR mask.
* Each scanline is padded to a 32-bit boundary.
*/
}
#include "vmware_pack_end.h"
SVGAFifoCmdDefineCursor;
 
 
/*
* SVGA_CMD_DEFINE_ALPHA_CURSOR --
*
* Provide a new cursor image, in 32-bit BGRA format.
*
* The recommended way to position the cursor overlay is by using
* the SVGA_FIFO_CURSOR_* registers, supported by the
* SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
*
* Availability:
* SVGA_CAP_ALPHA_CURSOR
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
/* Followed by scanline data */
}
#include "vmware_pack_end.h"
SVGAFifoCmdDefineAlphaCursor;
 
 
/*
* SVGA_CMD_UPDATE_VERBOSE --
*
* Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
* 'reason' value, an opaque cookie which is used by internal
* debugging tools. Third party drivers should not use this
* command.
*
* Availability:
* SVGA_CAP_EXTENDED_FIFO
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 x;
uint32 y;
uint32 width;
uint32 height;
uint32 reason;
}
#include "vmware_pack_end.h"
SVGAFifoCmdUpdateVerbose;
 
 
/*
* SVGA_CMD_FRONT_ROP_FILL --
*
* This is a hint which tells the SVGA device that the driver has
* just filled a rectangular region of the GFB with a solid
* color. Instead of reading these pixels from the GFB, the device
* can assume that they all equal 'color'. This is primarily used
* for remote desktop protocols.
*
* Availability:
* SVGA_FIFO_CAP_ACCELFRONT
*/
 
#define SVGA_ROP_COPY 0x03
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 color; /* In the same format as the GFB */
uint32 x;
uint32 y;
uint32 width;
uint32 height;
uint32 rop; /* Must be SVGA_ROP_COPY */
}
#include "vmware_pack_end.h"
SVGAFifoCmdFrontRopFill;
 
 
/*
* SVGA_CMD_FENCE --
*
* Insert a synchronization fence. When the SVGA device reaches
* this command, it will copy the 'fence' value into the
* SVGA_FIFO_FENCE register. It will also compare the fence against
* SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
* SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
* raise this interrupt.
*
* Availability:
* SVGA_FIFO_FENCE for this command,
* SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 fence;
}
#include "vmware_pack_end.h"
SVGAFifoCmdFence;
 
 
/*
* SVGA_CMD_ESCAPE --
*
* Send an extended or vendor-specific variable length command.
* This is used for video overlay, third party plugins, and
* internal debugging tools. See svga_escape.h
*
* Availability:
* SVGA_FIFO_CAP_ESCAPE
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 nsid;
uint32 size;
/* followed by 'size' bytes of data */
}
#include "vmware_pack_end.h"
SVGAFifoCmdEscape;
 
 
/*
* SVGA_CMD_DEFINE_SCREEN --
*
* Define or redefine an SVGAScreenObject. See the description of
* SVGAScreenObject above. The video driver is responsible for
* generating new screen IDs. They should be small positive
* integers. The virtual device will have an implementation
* specific upper limit on the number of screen IDs
* supported. Drivers are responsible for recycling IDs. The first
* valid ID is zero.
*
* - Interaction with other registers:
*
* For backwards compatibility, when the GFB mode registers (WIDTH,
* HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
* deletes all screens other than screen #0, and redefines screen
* #0 according to the specified mode. Drivers that use
* SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
*
* If you use screen objects, do not use the legacy multi-mon
* registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAScreenObject screen; /* Variable-length according to version */
}
#include "vmware_pack_end.h"
SVGAFifoCmdDefineScreen;
 
 
/*
* SVGA_CMD_DESTROY_SCREEN --
*
* Destroy an SVGAScreenObject. Its ID is immediately available for
* re-use.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 screenId;
}
#include "vmware_pack_end.h"
SVGAFifoCmdDestroyScreen;
 
 
/*
* SVGA_CMD_DEFINE_GMRFB --
*
* This command sets a piece of SVGA device state called the
* Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
* piece of light-weight state which identifies the location and
* format of an image in guest memory or in BAR1. The GMRFB has
* an arbitrary size, and it doesn't need to match the geometry
* of the GFB or any screen object.
*
* The GMRFB can be redefined as often as you like. You could
* always use the same GMRFB, you could redefine it before
* rendering from a different guest screen, or you could even
* redefine it before every blit.
*
* There are multiple ways to use this command. The simplest way is
* to use it to move the framebuffer either to elsewhere in the GFB
* (BAR1) memory region, or to a user-defined GMR. This lets a
* driver use a framebuffer allocated entirely out of normal system
* memory, which we encourage.
*
* Another way to use this command is to set up a ring buffer of
* updates in GFB memory. If a driver wants to ensure that no
* frames are skipped by the SVGA device, it is important that the
* driver not modify the source data for a blit until the device is
* done processing the command. One efficient way to accomplish
* this is to use a ring of small DMA buffers. Each buffer is used
* for one blit, then we move on to the next buffer in the
* ring. The FENCE mechanism is used to protect each buffer from
* re-use until the device is finished with that buffer's
* corresponding blit.
*
* This command does not affect the meaning of SVGA_CMD_UPDATE.
* UPDATEs always occur from the legacy GFB memory area. This
* command has no support for pseudocolor GMRFBs. Currently only
* true-color 15, 16, and 24-bit depths are supported. Future
* devices may expose capabilities for additional framebuffer
* formats.
*
* The default GMRFB value is undefined. Drivers must always send
* this command at least once before performing any blit from the
* GMRFB.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAGuestPtr ptr;
uint32 bytesPerLine;
SVGAGMRImageFormat format;
}
#include "vmware_pack_end.h"
SVGAFifoCmdDefineGMRFB;
 
 
/*
* SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
*
* This is a guest-to-host blit. It performs a DMA operation to
* copy a rectangular region of pixels from the current GMRFB to
* a ScreenObject.
*
* The destination coordinate may be specified relative to a
* screen's origin. The provided screen ID must be valid.
*
* The SVGA device is guaranteed to finish reading from the GMRFB
* by the time any subsequent FENCE commands are reached.
*
* This command consumes an annotation. See the
* SVGA_CMD_ANNOTATION_* commands for details.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGASignedPoint srcOrigin;
SVGASignedRect destRect;
uint32 destScreenId;
}
#include "vmware_pack_end.h"
SVGAFifoCmdBlitGMRFBToScreen;
 
 
/*
* SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
*
* This is a host-to-guest blit. It performs a DMA operation to
* copy a rectangular region of pixels from a single ScreenObject
* back to the current GMRFB.
*
* The source coordinate is specified relative to a screen's
* origin. The provided screen ID must be valid. If any parameters
* are invalid, the resulting pixel values are undefined.
*
* The SVGA device is guaranteed to finish writing to the GMRFB by
* the time any subsequent FENCE commands are reached.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGASignedPoint destOrigin;
SVGASignedRect srcRect;
uint32 srcScreenId;
}
#include "vmware_pack_end.h"
SVGAFifoCmdBlitScreenToGMRFB;
 
 
/*
* SVGA_CMD_ANNOTATION_FILL --
*
* The annotation commands have been deprecated, should not be used
* by new drivers. They used to provide performance hints to the SVGA
* device about the content of screen updates, but newer SVGA devices
* ignore these.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGAColorBGRX color;
}
#include "vmware_pack_end.h"
SVGAFifoCmdAnnotationFill;
 
 
/*
* SVGA_CMD_ANNOTATION_COPY --
*
* The annotation commands have been deprecated, should not be used
* by new drivers. They used to provide performance hints to the SVGA
* device about the content of screen updates, but newer SVGA devices
* ignore these.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
SVGASignedPoint srcOrigin;
uint32 srcScreenId;
}
#include "vmware_pack_end.h"
SVGAFifoCmdAnnotationCopy;
 
 
/*
* SVGA_CMD_DEFINE_GMR2 --
*
* Define guest memory region v2. See the description of GMRs above.
*
* Availability:
* SVGA_CAP_GMR2
*/
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 gmrId;
uint32 numPages;
}
#include "vmware_pack_end.h"
SVGAFifoCmdDefineGMR2;
 
 
/*
* SVGA_CMD_REMAP_GMR2 --
*
* Remap guest memory region v2. See the description of GMRs above.
*
* This command allows guest to modify a portion of an existing GMR by
* invalidating it or reassigning it to different guest physical pages.
* The pages are identified by physical page number (PPN). The pages
* are assumed to be pinned and valid for DMA operations.
*
* Description of command flags:
*
* SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
* The PPN list must not overlap with the remap region (this can be
* handled trivially by referencing a separate GMR). If flag is
* disabled, PPN list is appended to SVGARemapGMR command.
*
* SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
* it is in PPN32 format.
*
* SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
* A single PPN can be used to invalidate a portion of a GMR or
* map it to to a single guest scratch page.
*
* Availability:
* SVGA_CAP_GMR2
*/
 
typedef enum {
SVGA_REMAP_GMR2_PPN32 = 0,
SVGA_REMAP_GMR2_VIA_GMR = (1 << 0),
SVGA_REMAP_GMR2_PPN64 = (1 << 1),
SVGA_REMAP_GMR2_SINGLE_PPN = (1 << 2),
} SVGARemapGMR2Flags;
 
typedef
#include "vmware_pack_begin.h"
struct {
uint32 gmrId;
SVGARemapGMR2Flags flags;
uint32 offsetPages; /* offset in pages to begin remap */
uint32 numPages; /* number of pages to remap */
/*
* Followed by additional data depending on SVGARemapGMR2Flags.
*
* If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
* Otherwise an array of page descriptors in PPN32 or PPN64 format
* (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
* SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
*/
}
#include "vmware_pack_end.h"
SVGAFifoCmdRemapGMR2;
 
 
/*
* Size of SVGA device memory such as frame buffer and FIFO.
*/
#define SVGA_VRAM_MIN_SIZE (4 * 640 * 480) /* bytes */
#define SVGA_VRAM_MIN_SIZE_3D (16 * 1024 * 1024)
#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024)
#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024)
#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024)
#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
#define SVGA_GRAPHICS_MEMORY_KB_MAX (2 * 1024 * 1024)
#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
 
#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
 
/*
* To simplify autoDetect display configuration, support a minimum of
* two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
* numDisplays = 2
* maxWidth = numDisplay * 1920 = 3840
* maxHeight = rotated width of single monitor = 1920
* vramSize = maxWidth * maxHeight * 4 = 29491200
*/
#define SVGA_VRAM_SIZE_AUTODETECT (32 * 1024 * 1024)
 
#if defined(VMX86_SERVER)
#define SVGA_VRAM_SIZE (4 * 1024 * 1024)
#define SVGA_VRAM_SIZE_3D (64 * 1024 * 1024)
#define SVGA_FIFO_SIZE (256 * 1024)
#define SVGA_FIFO_SIZE_3D (516 * 1024)
#define SVGA_MEMORY_SIZE_DEFAULT (160 * 1024 * 1024)
#define SVGA_AUTODETECT_DEFAULT FALSE
#else
#define SVGA_VRAM_SIZE (16 * 1024 * 1024)
#define SVGA_VRAM_SIZE_3D SVGA_VRAM_MAX_SIZE
#define SVGA_FIFO_SIZE (2 * 1024 * 1024)
#define SVGA_FIFO_SIZE_3D SVGA_FIFO_SIZE
#define SVGA_MEMORY_SIZE_DEFAULT (768 * 1024 * 1024)
#define SVGA_AUTODETECT_DEFAULT TRUE
#endif
 
#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024)
#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024)
 
#endif
/drivers/video/drm/vmwgfx/device_include/svga_types.h
0,0 → 1,46
/**********************************************************
* Copyright 2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
#ifndef _VM_BASIC_TYPES_H_
#define _VM_BASIC_TYPES_H_
#include <linux/kernel.h>
 
typedef u32 uint32;
typedef s32 int32;
typedef u64 uint64;
typedef u16 uint16;
typedef s16 int16;
typedef u8 uint8;
typedef s8 int8;
 
typedef uint64 PA;
typedef uint32 PPN;
typedef uint64 PPN64;
 
typedef bool Bool;
 
#define MAX_UINT32 U32_MAX
#define MAX_UINT16 U16_MAX
 
#endif
/drivers/video/drm/vmwgfx/device_include/vm_basic_types.h
0,0 → 1,21
#ifndef _VM_BASIC_TYPES_H_
#define _VM_BASIC_TYPES_H_
#include <linux/kernel.h>
 
typedef u32 uint32;
typedef s32 int32;
typedef u64 uint64;
typedef u16 uint16;
typedef s16 int16;
typedef u8 uint8;
typedef s8 int8;
 
typedef uint64 PA;
typedef uint32 PPN;
typedef uint64 PPN64;
 
typedef bool Bool;
 
#define MAX_UINT32 U32_MAX
 
#endif
/drivers/video/drm/vmwgfx/device_include/vmware_pack_begin.h
0,0 → 1,25
/**********************************************************
* Copyright 2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
#include <linux/compiler.h>
/drivers/video/drm/vmwgfx/device_include/vmware_pack_end.h
0,0 → 1,25
/**********************************************************
* Copyright 2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
__packed
/drivers/video/drm/vmwgfx/main.c
1,69 → 1,103
#include <syscall.h>
 
#include <drm/drmP.h>
#include <drm.h>
 
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
 
#include "vmwgfx_drv.h"
 
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <linux/pci.h>
#include <syscall.h>
#include <display.h>
 
#include "bitmap.h"
#define VMW_DEV_CLOSE 0
#define VMW_DEV_INIT 1
#define VMW_DEV_READY 2
void cpu_detect1();
int kmap_init();
 
struct pci_device {
uint16_t domain;
uint8_t bus;
uint8_t dev;
uint8_t func;
uint16_t vendor_id;
uint16_t device_id;
uint16_t subvendor_id;
uint16_t subdevice_id;
uint32_t device_class;
uint8_t revision;
};
 
unsigned long volatile jiffies;
int oops_in_progress;
int x86_clflush_size;
unsigned int tsc_khz;
struct workqueue_struct *system_wq;
int driver_wq_state;
struct drm_device *main_device;
struct drm_file *drm_file_handlers[256];
int kms_modeset = 1;
static char log[256];
 
int vmw_init(void);
int kms_init(struct drm_device *dev);
void vmw_driver_thread();
void kms_update();
void cpu_detect();
 
void parse_cmdline(char *cmdline, char *log);
int _stdcall display_handler(ioctl_t *io);
void kms_update();
void vmw_fb_update(struct vmw_private *vmw_priv);
 
 
void get_pci_info(struct pci_device *dev);
int gem_getparam(struct drm_device *dev, void *data);
 
int i915_mask_update(struct drm_device *dev, void *data,
struct drm_file *file);
void vmw_driver_thread()
{
struct vmw_private *dev_priv = NULL;
struct workqueue_struct *cwq = NULL;
unsigned long irqflags;
 
printf("%s\n",__FUNCTION__);
 
static char log[256];
while(driver_wq_state == VMW_DEV_INIT)
{
jiffies = GetClockNs() / 10000000;
delay(1);
};
 
struct workqueue_struct *system_wq;
int driver_wq_state;
if( driver_wq_state == VMW_DEV_CLOSE)
{
asm volatile ("int $0x40"::"a"(-1));
};
 
int x86_clflush_size;
unsigned int tsc_khz;
dev_priv = main_device->dev_private;
cwq = system_wq;
 
int kms_modeset = 1;
while(driver_wq_state != VMW_DEV_CLOSE )
{
jiffies = GetClockNs() / 10000000;
 
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
// kms_update();
 
spin_lock_irqsave(&cwq->lock, irqflags);
while (!list_empty(&cwq->worklist))
{
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;
list_del_init(cwq->worklist.next);
 
spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work);
spin_lock_irqsave(&cwq->lock, irqflags);
}
spin_unlock_irqrestore(&cwq->lock, irqflags);
 
vmw_fb_update(dev_priv);
delay(2);
};
 
asm volatile ("int $0x40"::"a"(-1));
}
 
u32 __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
static pci_dev_t device;
const struct pci_device_id *ent;
char *safecmdline;
int err = 0;
 
if(action != 1)
{
driver_wq_state = 0;
driver_wq_state = VMW_DEV_CLOSE;
return 0;
};
 
79,34 → 113,48
return 0;
}
 
dbgprintf(" vmw v3.14-rc1\n cmdline: %s\n", cmdline);
cpu_detect1();
 
cpu_detect();
dbgprintf("\ncache line size %d\n", x86_clflush_size);
err = enum_pci_devices();
if( unlikely(err != 0) )
{
dbgprintf("Device enumeration failed\n");
return 0;
}
 
enum_pci_devices();
err = kmap_init();
if( unlikely(err != 0) )
{
dbgprintf("kmap initialization failed\n");
return 0;
}
 
driver_wq_state = VMW_DEV_INIT;
CreateKernelThread(vmw_driver_thread);
err = vmw_init();
if(err)
if(unlikely(err!= 0))
{
driver_wq_state = VMW_DEV_CLOSE;
dbgprintf("Epic Fail :(\n");
delay(100);
return 0;
};
kms_init(main_device);
LINE();
 
driver_wq_state = VMW_DEV_READY;
 
// kms_init(main_device);
 
err = RegService("DISPLAY", display_handler);
 
if( err != 0)
dbgprintf("Set DISPLAY handler\n");
 
driver_wq_state = 1;
 
CreateKernelThread(vmw_driver_thread);
 
return err;
};
 
 
#define CURRENT_API 0x0200 /* 2.00 */
#define COMPATIBLE_API 0x0100 /* 1.00 */
 
118,36 → 166,10
#define SRV_ENUM_MODES 1
#define SRV_SET_MODE 2
#define SRV_GET_CAPS 3
#define SRV_CMDLINE 4
 
#define SRV_CREATE_SURFACE 10
#define SRV_DESTROY_SURFACE 11
#define SRV_LOCK_SURFACE 12
#define SRV_UNLOCK_SURFACE 13
#define SRV_RESIZE_SURFACE 14
#define SRV_BLIT_BITMAP 15
#define SRV_BLIT_TEXTURE 16
#define SRV_BLIT_VIDEO 17
 
 
#define SRV_GET_PCI_INFO 20
#define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
#define SRV_I915_GEM_SET_CACHEING 25
#define SRV_I915_GEM_GET_APERTURE 26
#define SRV_I915_GEM_PWRITE 27
#define SRV_I915_GEM_BUSY 28
#define SRV_I915_GEM_SET_DOMAIN 29
#define SRV_I915_GEM_MMAP 30
#define SRV_I915_GEM_MMAP_GTT 31
#define SRV_I915_GEM_THROTTLE 32
#define SRV_FBINFO 33
#define SRV_I915_GEM_EXECBUFFER2 34
#define SRV_MASK_UPDATE 35
 
 
 
#define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
break;
161,8 → 183,8
struct drm_file *file;
 
int retval = -1;
u32_t *inp;
u32_t *outp;
u32 *inp;
u32 *outp;
 
inp = io->input;
outp = io->output;
199,28 → 221,7
retval = get_driver_caps((hwcaps_t*)inp);
break;
 
case SRV_CREATE_SURFACE:
// check_input(8);
// retval = create_surface(main_device, (struct io_call_10*)inp);
break;
 
case SRV_LOCK_SURFACE:
// retval = lock_surface((struct io_call_12*)inp);
break;
 
case SRV_RESIZE_SURFACE:
// retval = resize_surface((struct io_call_14*)inp);
break;
 
case SRV_BLIT_BITMAP:
// srv_blit_bitmap( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
// blit_tex( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
break;
 
case SRV_GET_PCI_INFO:
get_pci_info((struct pci_device *)inp);
retval = 0;
299,10 → 300,10
#define PCI_CLASS_BRIDGE_HOST 0x0600
#define PCI_CLASS_BRIDGE_ISA 0x0601
 
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
int pci_scan_filter(u32 id, u32 busnr, u32 devfn)
{
u16_t vendor, device;
u32_t class;
u16 vendor, device;
u32 class;
int ret = 0;
 
vendor = id & 0xffff;
353,34 → 354,31
};
};
 
struct mtrr
{
u64 base;
u64 mask;
};
 
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
struct cpuinfo
{
/* ecx is often an input as well as an output. */
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (*eax), "2" (*ecx)
: "memory");
}
u64 caps;
u64 def_mtrr;
u64 mtrr_cap;
int var_mtrr_count;
int fix_mtrr_count;
struct mtrr var_mtrr[9];
char model_name[64];
};
 
static u32 deftype_lo, deftype_hi;
 
 
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
void cpu_detect1()
{
*eax = op;
*ecx = 0;
__cpuid(eax, ebx, ecx, edx);
}
struct cpuinfo cpuinfo;
 
void cpu_detect()
{
u32 junk, tfms, cap0, misc;
int i;
 
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 
439,356 → 437,8
#include <linux/ctype.h>
 
 
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
{
while (bytes) {
if (*start != value)
return (void *)start;
start++;
bytes--;
}
return NULL;
}
 
/**
* memchr_inv - Find an unmatching character in an area of memory.
* @start: The memory area
* @c: Find a character other than c
* @bytes: The size of the area.
*
* returns the address of the first character other than @c, or %NULL
* if the whole buffer contains just @c.
*/
void *memchr_inv(const void *start, int c, size_t bytes)
{
u8 value = c;
u64 value64;
unsigned int words, prefix;
 
if (bytes <= 16)
return check_bytes8(start, value, bytes);
 
value64 = value;
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
value64 |= value64 << 8;
value64 |= value64 << 16;
value64 |= value64 << 32;
#endif
 
prefix = (unsigned long)start % 8;
if (prefix) {
u8 *r;
 
prefix = 8 - prefix;
r = check_bytes8(start, value, prefix);
if (r)
return r;
start += prefix;
bytes -= prefix;
}
 
words = bytes / 8;
 
while (words) {
if (*(u64 *)start != value64)
return check_bytes8(start, value, 8);
start += 8;
words--;
}
 
return check_bytes8(start, value, bytes % 8);
}
 
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
 
i = vsnprintf(buf, size, fmt, args);
 
if (likely(i < size))
return i;
if (size != 0)
return size - 1;
return 0;
}
 
 
int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
 
va_start(args, fmt);
i = vscnprintf(buf, size, fmt, args);
va_end(args);
 
return i;
}
 
 
 
#define _U 0x01 /* upper */
#define _L 0x02 /* lower */
#define _D 0x04 /* digit */
#define _C 0x08 /* cntrl */
#define _P 0x10 /* punct */
#define _S 0x20 /* white space (space/lf/tab) */
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
 
extern const unsigned char _ctype[];
 
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
 
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
#define isdigit(c) ((__ismask(c)&(_D)) != 0)
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
/* Note: isspace() must return false for %NUL-terminator */
#define isspace(c) ((__ismask(c)&(_S)) != 0)
#define isupper(c) ((__ismask(c)&(_U)) != 0)
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
 
#define isascii(c) (((unsigned char)(c))<=0x7f)
#define toascii(c) (((unsigned char)(c))&0x7f)
 
 
 
//const char hex_asc[] = "0123456789abcdef";
 
/**
* hex_to_bin - convert a hex digit to its real value
* @ch: ascii character represents hex digit
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
*/
int hex_to_bin(char ch)
{
if ((ch >= '0') && (ch <= '9'))
return ch - '0';
ch = tolower(ch);
if ((ch >= 'a') && (ch <= 'f'))
return ch - 'a' + 10;
return -1;
}
EXPORT_SYMBOL(hex_to_bin);
 
/**
* hex2bin - convert an ascii hexadecimal string to its binary representation
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
*
* Return 0 on success, -1 in case of bad input.
*/
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
int hi = hex_to_bin(*src++);
int lo = hex_to_bin(*src++);
 
if ((hi < 0) || (lo < 0))
return -1;
 
*dst++ = (hi << 4) | lo;
}
return 0;
}
EXPORT_SYMBOL(hex2bin);
 
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @linebuf: where to put the converted data
* @linebuflen: total size of @linebuf, including space for terminating NUL
* @ascii: include ASCII after the hex output
*
* hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
*
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
* to a hex + ASCII dump at the supplied memory location.
* The converted output is always NUL-terminated.
*
* E.g.:
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
* linebuf, sizeof(linebuf), true);
*
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
*/
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii)
{
const u8 *ptr = buf;
u8 ch;
int j, lx = 0;
int ascii_column;
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
if (!len)
goto nil;
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
 
switch (groupsize) {
case 8: {
const u64 *ptr8 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
(unsigned long long)*(ptr8 + j));
ascii_column = 17 * ngroups + 2;
break;
}
 
case 4: {
const u32 *ptr4 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "", *(ptr4 + j));
ascii_column = 9 * ngroups + 2;
break;
}
 
case 2: {
const u16 *ptr2 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "", *(ptr2 + j));
ascii_column = 5 * ngroups + 2;
break;
}
 
default:
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
linebuf[lx++] = hex_asc_lo(ch);
linebuf[lx++] = ' ';
}
if (j)
lx--;
 
ascii_column = 3 * rowsize + 2;
break;
}
if (!ascii)
goto nil;
 
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
linebuf[lx++] = ' ';
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
}
nil:
linebuf[lx++] = '\0';
}
 
/**
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
* @level: kernel log level (e.g. KERN_DEBUG)
* @prefix_str: string to prefix each line with;
* caller supplies trailing spaces for alignment if desired
* @prefix_type: controls whether prefix of an offset, address, or none
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @ascii: include ASCII after the hex output
*
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
* to the kernel log at the specified kernel log level, with an optional
* leading prefix.
*
* print_hex_dump() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
* print_hex_dump() iterates over the entire input @buf, breaking it into
* "line size" chunks to format and print.
*
* E.g.:
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
* 16, 1, frame->data, frame->len, true);
*
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
*/
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
const u8 *ptr = buf;
int i, linelen, remaining = len;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
for (i = 0; i < len; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
 
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
linebuf, sizeof(linebuf), ascii);
 
switch (prefix_type) {
case DUMP_PREFIX_ADDRESS:
printk("%s%s%p: %s\n",
level, prefix_str, ptr + i, linebuf);
break;
case DUMP_PREFIX_OFFSET:
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
break;
default:
printk("%s%s%s\n", level, prefix_str, linebuf);
break;
}
}
}
 
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
const void *buf, size_t len)
{
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
buf, len, true);
}
 
 
 
 
 
 
 
 
 
#include "vmwgfx_kms.h"
 
void kms_update();
796,51 → 446,10
 
extern struct drm_device *main_device;
 
typedef struct
{
kobj_t header;
 
uint32_t *data;
uint32_t hot_x;
uint32_t hot_y;
 
struct list_head list;
void *priv;
}cursor_t;
 
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
 
struct tag_display
{
int x;
int y;
int width;
int height;
int bpp;
int vrefresh;
int pitch;
int lfb;
 
int supported_modes;
struct drm_device *ddev;
struct drm_connector *connector;
struct drm_crtc *crtc;
 
struct list_head cursors;
 
cursor_t *cursor;
int (*init_cursor)(cursor_t*);
cursor_t* (__stdcall *select_cursor)(cursor_t*);
void (*show_cursor)(int show);
void (__stdcall *move_cursor)(cursor_t *cursor, int x, int y);
void (__stdcall *restore_cursor)(int x, int y);
void (*disable_mouse)(void);
u32 mask_seqno;
u32 check_mouse;
u32 check_m_pixel;
};
 
display_t *os_display;
 
static int count_connector_modes(struct drm_connector* connector)
885,22 → 494,6
return old;
};
 
void vmw_driver_thread()
{
DRM_DEBUG_KMS("%s\n",__FUNCTION__);
 
select_cursor_kms(os_display->cursor);
 
while(driver_wq_state)
{
kms_update();
delay(2);
};
__asm__ __volatile__ (
"int $0x40"
::"a"(-1));
}
 
int kms_init(struct drm_device *dev)
{
struct drm_connector *connector;
909,7 → 502,7
struct vmw_display_unit *du;
cursor_t *cursor;
int mode_count;
u32_t ifl;
u32 ifl;
int err;
 
crtc = list_entry(dev->mode_config.crtc_list.next, typeof(*crtc), head);
966,7 → 559,7
{
struct vmw_private *dev_priv = vmw_priv(main_device);
size_t fifo_size;
u32_t ifl;
u32 ifl;
int i;
 
struct {
981,7 → 574,7
DRM_ERROR("Fifo reserve failed.\n");
return;
}
 
os_display = GetDisplay();
cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd->body.x = 0;
cmd->body.y = 0;
1012,8 → 605,8
{
if( i < *count)
{
mode->width = drm_mode_width(drmmode);
mode->height = drm_mode_height(drmmode);
// mode->width = drm_mode_width(drmmode);
// mode->height = drm_mode_height(drmmode);
mode->bpp = 32;
mode->freq = drmmode->vrefresh;
i++;
1048,8 → 641,8
(mode->height != os_display->height) ||
(mode->freq != os_display->vrefresh) ) )
{
if( set_mode(os_display->ddev, os_display->connector, mode, true) )
err = 0;
// if( set_mode(os_display->ddev, os_display->connector, mode, true) )
// err = 0;
};
 
return err;
1060,7 → 653,7
struct file *filep;
int count;
 
filep = malloc(sizeof(*filep));
filep = __builtin_malloc(sizeof(*filep));
 
if(unlikely(filep == NULL))
return ERR_PTR(-ENOMEM);
1109,3 → 702,417
return page;
};
 
ktime_t ktime_get(void)
{
ktime_t t;
 
t.tv64 = GetClockNs();
 
return t;
}
 
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
return true;
}
 
int reservation_object_reserve_shared(struct reservation_object *obj)
{
return 0;
}
 
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence)
{};
 
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct fence *fence)
{};
 
#define KMAP_MAX 256
 
static struct mutex kmap_mutex;
static struct page* kmap_table[KMAP_MAX];
static int kmap_av;
static int kmap_first;
static void* kmap_base;
 
 
int kmap_init()
{
kmap_base = AllocKernelSpace(KMAP_MAX*4096);
if(kmap_base == NULL)
return -1;
 
kmap_av = KMAP_MAX;
MutexInit(&kmap_mutex);
return 0;
};
 
void *kmap(struct page *page)
{
void *vaddr = NULL;
int i;
 
do
{
MutexLock(&kmap_mutex);
if(kmap_av != 0)
{
for(i = kmap_first; i < KMAP_MAX; i++)
{
if(kmap_table[i] == NULL)
{
kmap_av--;
kmap_first = i;
kmap_table[i] = page;
vaddr = kmap_base + (i<<12);
MapPage(vaddr,(addr_t)page,3);
break;
};
};
};
MutexUnlock(&kmap_mutex);
}while(vaddr == NULL);
 
return vaddr;
};
 
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
 
void kunmap(struct page *page)
{
void *vaddr;
int i;
 
MutexLock(&kmap_mutex);
 
for(i = 0; i < KMAP_MAX; i++)
{
if(kmap_table[i] == page)
{
kmap_av++;
if(i < kmap_first)
kmap_first = i;
kmap_table[i] = NULL;
vaddr = kmap_base + (i<<12);
MapPage(vaddr,0,0);
break;
};
};
 
MutexUnlock(&kmap_mutex);
};
 
void kunmap_atomic(void *vaddr)
{
int i;
 
MapPage(vaddr,0,0);
 
i = (vaddr - kmap_base) >> 12;
 
MutexLock(&kmap_mutex);
 
kmap_av++;
if(i < kmap_first)
kmap_first = i;
kmap_table[i] = NULL;
 
MutexUnlock(&kmap_mutex);
}
 
 
#include <linux/rcupdate.h>
 
struct rcu_ctrlblk {
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
struct rcu_head **curtail; /* ->next pointer of last CB. */
// RCU_TRACE(long qlen); /* Number of pending CBs. */
// RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
// RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
// RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
// RCU_TRACE(const char *name); /* Name of RCU type. */
};
 
/* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
.donetail = &rcu_sched_ctrlblk.rcucblist,
.curtail = &rcu_sched_ctrlblk.rcucblist,
// RCU_TRACE(.name = "rcu_sched")
};
 
static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp)
{
unsigned long flags;
 
// debug_rcu_head_queue(head);
head->func = func;
head->next = NULL;
 
local_irq_save(flags);
*rcp->curtail = head;
rcp->curtail = &head->next;
// RCU_TRACE(rcp->qlen++);
local_irq_restore(flags);
}
 
/*
* Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_sched_ctrlblk);
}
 
 
fb_get_options(const char *name, char **option)
{
return 1;
}
 
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
{
while (bytes) {
if (*start != value)
return (void *)start;
start++;
bytes--;
}
return NULL;
}
 
/**
* memchr_inv - Find an unmatching character in an area of memory.
* @start: The memory area
* @c: Find a character other than c
* @bytes: The size of the area.
*
* returns the address of the first character other than @c, or %NULL
* if the whole buffer contains just @c.
*/
void *memchr_inv(const void *start, int c, size_t bytes)
{
u8 value = c;
u64 value64;
unsigned int words, prefix;
 
if (bytes <= 16)
return check_bytes8(start, value, bytes);
 
value64 = value;
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
value64 |= value64 << 8;
value64 |= value64 << 16;
value64 |= value64 << 32;
#endif
 
prefix = (unsigned long)start % 8;
if (prefix) {
u8 *r;
 
prefix = 8 - prefix;
r = check_bytes8(start, value, prefix);
if (r)
return r;
start += prefix;
bytes -= prefix;
}
 
words = bytes / 8;
 
while (words) {
if (*(u64 *)start != value64)
return check_bytes8(start, value, 8);
start += 8;
words--;
}
 
return check_bytes8(start, value, bytes % 8);
}
 
 
void drm_master_put(struct drm_master **master)
{};
 
 
bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base)
{
return true;
};
 
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
list_del_init(&wait->task_list);
return 1;
}
 
 
struct file *fd_array[32];
 
struct file *fget(unsigned int fd)
{
struct file *file;
 
file = fd_array[fd];
get_file_rcu(file);
return file;
}
 
void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count))
{
 
}
}
 
struct dma_buf *dma_buf_get(int fd)
{
struct file *file;
 
file = fget(fd);
 
if (!file)
return ERR_PTR(-EBADF);
 
// if (!is_dma_buf_file(file)) {
// fput(file);
// return ERR_PTR(-EINVAL);
// }
 
return file->private_data;
}
 
int get_unused_fd_flags(unsigned flags)
{
return 1;
}
 
void fd_install(unsigned int fd, struct file *file)
{
fd_array[fd] = file;
}
 
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
int fd;
 
if (!dmabuf || !dmabuf->file)
return -EINVAL;
 
fd = get_unused_fd_flags(flags);
if (fd < 0)
return fd;
 
fd_install(fd, dmabuf->file);
 
return fd;
}
 
void dma_buf_put(struct dma_buf *dmabuf)
{
if (WARN_ON(!dmabuf || !dmabuf->file))
return;
 
fput(dmabuf->file);
}
 
 
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
{
struct dma_buf *dmabuf;
struct reservation_object *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
 
if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
else
/* prevent &dma_buf[1] == dma_buf->resv */
alloc_size += 1;
 
if (WARN_ON(!exp_info->priv
|| !exp_info->ops
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release
|| !exp_info->ops->kmap_atomic
|| !exp_info->ops->kmap
|| !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
}
 
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
return ERR_PTR(-ENOMEM);
}
 
dmabuf->priv = exp_info->priv;
dmabuf->ops = exp_info->ops;
dmabuf->size = exp_info->size;
dmabuf->exp_name = exp_info->exp_name;
 
if (!resv) {
resv = (struct reservation_object *)&dmabuf[1];
reservation_object_init(resv);
}
// dmabuf->resv = resv;
 
// file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
// exp_info->flags);
// if (IS_ERR(file)) {
// kfree(dmabuf);
// return ERR_CAST(file);
// }
 
// file->f_mode |= FMODE_LSEEK;
// dmabuf->file = file;
 
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
 
// mutex_lock(&db_list.lock);
// list_add(&dmabuf->list_node, &db_list.head);
// mutex_unlock(&db_list.lock);
 
return dmabuf;
}
 
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir)
{
struct scatterlist *s;
int i;
 
for_each_sg(sglist, s, nelems, i) {
s->dma_address = (dma_addr_t)sg_phys(s);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
s->dma_length = s->length;
#endif
}
 
return nelems;
}
 
void *vmalloc(unsigned long size)
{
return KernelAlloc(size);
}
 
void vfree(const void *addr)
{
KernelFree(addr);
}
/drivers/video/drm/vmwgfx/pci.c
1,13 → 1,15
#include <syscall.h>
 
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <pci.h>
#include <syscall.h>
#include <linux/slab.h>
#include <linux/pm.h>
 
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
#include <linux/pci.h>
 
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn);
 
static LIST_HEAD(devices);
 
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
31,9 → 33,9
}
 
 
static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask)
static u32 pci_size(u32 base, u32 maxbase, u32 mask)
{
u32_t size = mask & maxbase; /* Find the significant bits */
u32 size = mask & maxbase; /* Find the significant bits */
 
if (!size)
return 0;
50,9 → 52,9
return size;
}
 
static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask)
static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
{
u64_t size = mask & maxbase; /* Find the significant bits */
u64 size = mask & maxbase; /* Find the significant bits */
 
if (!size)
return 0;
69,7 → 71,7
return size;
}
 
static inline int is_64bit_memory(u32_t mask)
static inline int is_64bit_memory(u32 mask)
{
if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
79,15 → 81,15
 
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
{
u32_t pos, reg, next;
u32_t l, sz;
u32 pos, reg, next;
u32 l, sz;
struct resource *res;
 
for(pos=0; pos < howmany; pos = next)
{
u64_t l64;
u64_t sz64;
u32_t raw_sz;
u64 l64;
u64 sz64;
u32 raw_sz;
 
next = pos + 1;
 
109,7 → 111,7
if ((l & PCI_BASE_ADDRESS_SPACE) ==
PCI_BASE_ADDRESS_SPACE_MEMORY)
{
sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK);
sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
/*
* For 64bit prefetchable memory sz could be 0, if the
* real size is bigger than 4G, so we need to check
131,14 → 133,14
res->flags |= pci_calc_resource_flags(l);
if (is_64bit_memory(l))
{
u32_t szhi, lhi;
u32 szhi, lhi;
 
lhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0);
szhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi);
sz64 = ((u64_t)szhi << 32) | raw_sz;
l64 = ((u64_t)lhi << 32) | l;
sz64 = ((u64)szhi << 32) | raw_sz;
l64 = ((u64)lhi << 32) | l;
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
next++;
 
162,7 → 164,7
{
/* 64-bit wide address, treat as disabled */
PciWrite32(dev->busnr, dev->devfn, reg,
l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK);
l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
PciWrite32(dev->busnr, dev->devfn, reg+4, 0);
res->start = 0;
res->end = sz;
186,7 → 188,7
 
if (sz && sz != 0xffffffff)
{
sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK);
sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
 
if (sz)
{
202,7 → 204,7
 
static void pci_read_irq(struct pci_dev *dev)
{
u8_t irq;
u8 irq;
 
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN);
dev->pin = irq;
214,7 → 216,7
 
int pci_setup_device(struct pci_dev *dev)
{
u32_t class;
u32 class;
 
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION);
dev->revision = class & 0xff;
246,7 → 248,7
*/
if (class == PCI_CLASS_STORAGE_IDE)
{
u8_t progif;
u8 progif;
 
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG);
if ((progif & 1) == 0)
311,12 → 313,12
return 0;
};
 
static pci_dev_t* pci_scan_device(u32_t busnr, int devfn)
static pci_dev_t* pci_scan_device(u32 busnr, int devfn)
{
pci_dev_t *dev;
 
u32_t id;
u8_t hdr;
u32 id;
u8 hdr;
 
int timeout = 10;
 
372,7 → 374,7
 
 
 
int pci_scan_slot(u32_t bus, int devfn)
int _pci_scan_slot(u32 bus, int devfn)
{
int func, nr = 0;
 
480,8 → 482,8
int enum_pci_devices()
{
pci_dev_t *dev;
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
u32 last_bus;
u32 bus = 0 , devfn = 0;
 
 
last_bus = PciApi(1);
493,7 → 495,7
for(;bus <= last_bus; bus++)
{
for (devfn = 0; devfn < 0x100; devfn += 8)
pci_scan_slot(bus, devfn);
_pci_scan_slot(bus, devfn);
 
 
}
571,7 → 573,7
};
 
 
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
{
pci_dev_t *dev;
 
664,20 → 666,8
}
 
 
struct pci_bus_region {
resource_size_t start;
resource_size_t end;
};
 
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
region->start = res->start;
region->end = res->end;
}
 
 
int pci_enable_rom(struct pci_dev *pdev)
{
struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
687,7 → 677,7
if (!res->flags)
return -1;
 
pcibios_resource_to_bus(pdev, &region, res);
_pcibios_resource_to_bus(pdev, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
783,18 → 773,9
return (void __iomem *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
/* assign the ROM an address if it doesn't have one */
// if (res->parent == NULL &&
// pci_assign_resource(pdev,PCI_ROM_RESOURCE))
return NULL;
// start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
// *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
// if (*size == 0)
// return NULL;
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
 
/* Enable ROM space decodes */
// if (pci_enable_rom(pdev))
// return NULL;
}
}
 
831,28 → 812,6
pci_disable_rom(pdev);
}
 
#if 0
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
 
/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
if (pci_is_pcie(dev))
return;
 
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat < 16)
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
else if (lat > pcibios_max_latency)
lat = pcibios_max_latency;
else
return;
dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
#endif
 
 
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
870,6 → 829,13
dev->is_busmaster = enable;
}
 
 
/* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
* Enables bus-mastering on the device and calls pcibios_set_master()
* to do the needed arch specific settings.
*/
void pci_set_master(struct pci_dev *dev)
{
__pci_set_master(dev, true);
876,5 → 842,230
// pcibios_set_master(dev);
}
 
/**
* pci_clear_master - disables bus-mastering for device dev
* @dev: the PCI device to disable
*/
void pci_clear_master(struct pci_dev *dev)
{
__pci_set_master(dev, false);
}
 
 
static inline int pcie_cap_version(const struct pci_dev *dev)
{
return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS;
}
 
static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
{
return true;
}
 
static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_ENDPOINT ||
type == PCI_EXP_TYPE_LEG_END;
}
 
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
(type == PCI_EXP_TYPE_DOWNSTREAM &&
dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
}
 
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC;
}
 
static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
if (!pci_is_pcie(dev))
return false;
 
switch (pos) {
case PCI_EXP_FLAGS_TYPE:
return true;
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVSTA:
return pcie_cap_has_devctl(dev);
case PCI_EXP_LNKCAP:
case PCI_EXP_LNKCTL:
case PCI_EXP_LNKSTA:
return pcie_cap_has_lnkctl(dev);
case PCI_EXP_SLTCAP:
case PCI_EXP_SLTCTL:
case PCI_EXP_SLTSTA:
return pcie_cap_has_sltctl(dev);
case PCI_EXP_RTCTL:
case PCI_EXP_RTCAP:
case PCI_EXP_RTSTA:
return pcie_cap_has_rtctl(dev);
case PCI_EXP_DEVCAP2:
case PCI_EXP_DEVCTL2:
case PCI_EXP_LNKCAP2:
case PCI_EXP_LNKCTL2:
case PCI_EXP_LNKSTA2:
return pcie_cap_version(dev) > 1;
default:
return false;
}
}
 
/*
* Note that these accessor functions are only for the "PCI Express
* Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
* other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
*/
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
int ret;
 
*val = 0;
if (pos & 1)
return -EINVAL;
 
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_word() fails, it may
* have been written as 0xFFFF if hardware error happens
* during pci_read_config_word().
*/
if (ret)
*val = 0;
return ret;
}
 
/*
* For Functions that do not implement the Slot Capabilities,
* Slot Status, and Slot Control registers, these spaces must
* be hardwired to 0b, with the exception of the Presence Detect
* State bit in the Slot Status register of Downstream Ports,
* which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
*/
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
 
return 0;
}
EXPORT_SYMBOL(pcie_capability_read_word);
 
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
{
int ret;
 
*val = 0;
if (pos & 3)
return -EINVAL;
 
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_dword() fails, it may
* have been written as 0xFFFFFFFF if hardware error happens
* during pci_read_config_dword().
*/
if (ret)
*val = 0;
return ret;
}
 
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
 
return 0;
}
EXPORT_SYMBOL(pcie_capability_read_dword);
 
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
{
if (pos & 1)
return -EINVAL;
 
if (!pcie_capability_reg_implemented(dev, pos))
return 0;
 
return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_word);
 
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
{
if (pos & 3)
return -EINVAL;
 
if (!pcie_capability_reg_implemented(dev, pos))
return 0;
 
return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_dword);
 
int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
u16 clear, u16 set)
{
int ret;
u16 val;
 
ret = pcie_capability_read_word(dev, pos, &val);
if (!ret) {
val &= ~clear;
val |= set;
ret = pcie_capability_write_word(dev, pos, val);
}
 
return ret;
}
 
 
 
int pcie_get_readrq(struct pci_dev *dev)
{
u16 ctl;
 
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
}
EXPORT_SYMBOL(pcie_get_readrq);
 
/**
* pcie_set_readrq - set PCI Express maximum memory read request
* @dev: PCI device to query
* @rq: maximum memory read count in bytes
* valid values are 128, 256, 512, 1024, 2048, 4096
*
* If possible sets maximum memory read request in bytes
*/
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
 
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
return -EINVAL;
 
v = (ffs(rq) - 8) << 12;
 
return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_READRQ, v);
}
 
/drivers/video/drm/vmwgfx/vmwgfx_binding.c
0,0 → 1,1294
/**************************************************************************
*
* Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* This file implements the vmwgfx context binding manager,
* The sole reason for having to use this code is that vmware guest
* backed contexts can be swapped out to their backing mobs by the device
* at any time, also swapped in at any time. At swapin time, the device
* validates the context bindings to make sure they point to valid resources.
* It's this outside-of-drawcall validation (that can happen at any time),
* that makes this code necessary.
*
* We therefore need to kill any context bindings pointing to a resource
* when the resource is swapped out. Furthermore, if the vmwgfx driver has
* swapped out the context we can't swap it in again to kill bindings because
* of backing mob reservation lockdep violations, so as part of
* context swapout, also kill all bindings of a context, so that they are
* already killed if a resource to which a binding points
* needs to be swapped out.
*
* Note that a resource can be pointed to by bindings from multiple contexts,
* Therefore we can't easily protect this data by a per context mutex
* (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
* to protect all binding manager data.
*
* Finally, any association between a context and a global resource
* (surface, shader or even DX query) is conceptually a context binding that
* needs to be tracked by this code.
*/
 
#include "vmwgfx_drv.h"
#include "vmwgfx_binding.h"
#include "device_include/svga3d_reg.h"
 
#define VMW_BINDING_RT_BIT 0
#define VMW_BINDING_PS_BIT 1
#define VMW_BINDING_SO_BIT 2
#define VMW_BINDING_VB_BIT 3
#define VMW_BINDING_NUM_BITS 4
 
#define VMW_BINDING_PS_SR_BIT 0
 
/**
* struct vmw_ctx_binding_state - per context binding state
*
* @dev_priv: Pointer to device private structure.
* @list: linked list of individual active bindings.
* @render_targets: Render target bindings.
* @texture_units: Texture units bindings.
* @ds_view: Depth-stencil view binding.
* @so_targets: StreamOutput target bindings.
* @vertex_buffers: Vertex buffer bindings.
* @index_buffer: Index buffer binding.
* @per_shader: Per shader-type bindings.
* @dirty: Bitmap tracking per binding-type changes that have not yet
* been emitted to the device.
* @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
* have not yet been emitted to the device.
* @bind_cmd_buffer: Scratch space used to construct binding commands.
* @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
* @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
* device binding slot of the first command data entry in @bind_cmd_buffer.
*
* Note that this structure also provides storage space for the individual
* struct vmw_ctx_binding objects, so that no dynamic allocation is needed
* for individual bindings.
*
*/
struct vmw_ctx_binding_state {
struct vmw_private *dev_priv;
struct list_head list;
struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
struct vmw_ctx_bindinfo_view ds_view;
struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
struct vmw_ctx_bindinfo_ib index_buffer;
struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
 
unsigned long dirty;
DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
 
u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
u32 bind_cmd_count;
u32 bind_first_slot;
};
 
static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind);
static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
bool rebind);
static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_binding_build_asserts(void) __attribute__ ((unused));
 
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
 
/**
* struct vmw_binding_info - Per binding type information for the binding
* manager
*
* @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
* @offsets: array[shader_slot] of offsets to the array[slot]
* of struct bindings for the binding type.
* @scrub_func: Pointer to the scrub function for this binding type.
*
* Holds static information to help optimize the binding manager and avoid
* an excessive amount of switch statements.
*/
struct vmw_binding_info {
size_t size;
const size_t *offsets;
vmw_scrub_func scrub_func;
};
 
/*
* A number of static variables that help determine the scrub func and the
* location of the struct vmw_ctx_bindinfo slots for each binding type.
*/
static const size_t vmw_binding_shader_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
};
static const size_t vmw_binding_rt_offsets[] = {
offsetof(struct vmw_ctx_binding_state, render_targets),
};
static const size_t vmw_binding_tex_offsets[] = {
offsetof(struct vmw_ctx_binding_state, texture_units),
};
static const size_t vmw_binding_cb_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
};
static const size_t vmw_binding_dx_ds_offsets[] = {
offsetof(struct vmw_ctx_binding_state, ds_view),
};
static const size_t vmw_binding_sr_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
};
static const size_t vmw_binding_so_offsets[] = {
offsetof(struct vmw_ctx_binding_state, so_targets),
};
static const size_t vmw_binding_vb_offsets[] = {
offsetof(struct vmw_ctx_binding_state, vertex_buffers),
};
static const size_t vmw_binding_ib_offsets[] = {
offsetof(struct vmw_ctx_binding_state, index_buffer),
};
 
static const struct vmw_binding_info vmw_binding_infos[] = {
[vmw_ctx_binding_shader] = {
.size = sizeof(struct vmw_ctx_bindinfo_shader),
.offsets = vmw_binding_shader_offsets,
.scrub_func = vmw_binding_scrub_shader},
[vmw_ctx_binding_rt] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_rt_offsets,
.scrub_func = vmw_binding_scrub_render_target},
[vmw_ctx_binding_tex] = {
.size = sizeof(struct vmw_ctx_bindinfo_tex),
.offsets = vmw_binding_tex_offsets,
.scrub_func = vmw_binding_scrub_texture},
[vmw_ctx_binding_cb] = {
.size = sizeof(struct vmw_ctx_bindinfo_cb),
.offsets = vmw_binding_cb_offsets,
.scrub_func = vmw_binding_scrub_cb},
[vmw_ctx_binding_dx_shader] = {
.size = sizeof(struct vmw_ctx_bindinfo_shader),
.offsets = vmw_binding_shader_offsets,
.scrub_func = vmw_binding_scrub_dx_shader},
[vmw_ctx_binding_dx_rt] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_rt_offsets,
.scrub_func = vmw_binding_scrub_dx_rt},
[vmw_ctx_binding_sr] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_sr_offsets,
.scrub_func = vmw_binding_scrub_sr},
[vmw_ctx_binding_ds] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_dx_ds_offsets,
.scrub_func = vmw_binding_scrub_dx_rt},
[vmw_ctx_binding_so] = {
.size = sizeof(struct vmw_ctx_bindinfo_so),
.offsets = vmw_binding_so_offsets,
.scrub_func = vmw_binding_scrub_so},
[vmw_ctx_binding_vb] = {
.size = sizeof(struct vmw_ctx_bindinfo_vb),
.offsets = vmw_binding_vb_offsets,
.scrub_func = vmw_binding_scrub_vb},
[vmw_ctx_binding_ib] = {
.size = sizeof(struct vmw_ctx_bindinfo_ib),
.offsets = vmw_binding_ib_offsets,
.scrub_func = vmw_binding_scrub_ib},
};
 
/**
* vmw_cbs_context - Return a pointer to the context resource of a
* context binding state tracker.
*
* @cbs: The context binding state tracker.
*
* Provided there are any active bindings, this function will return an
* unreferenced pointer to the context resource that owns the context
* binding state tracker. If there are no active bindings, this function
* will return NULL. Note that the caller must somehow ensure that a reference
* is held on the context resource prior to calling this function.
*/
static const struct vmw_resource *
vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
{
if (list_empty(&cbs->list))
return NULL;
 
return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
ctx_list)->ctx;
}
 
/**
* vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
*
* @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
* @bt: The binding type.
* @shader_slot: The shader slot of the binding. If none, then set to 0.
* @slot: The slot of the binding.
*/
static struct vmw_ctx_bindinfo *
vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
{
const struct vmw_binding_info *b = &vmw_binding_infos[bt];
size_t offset = b->offsets[shader_slot] + b->size*slot;
 
return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
}
 
/**
* vmw_binding_drop: Stop tracking a context binding
*
* @bi: Pointer to binding tracker storage.
*
* Stops tracking a context binding, and re-initializes its storage.
* Typically used when the context binding is replaced with a binding to
* another (or the same, for that matter) resource.
*/
static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
{
list_del(&bi->ctx_list);
if (!list_empty(&bi->res_list))
list_del(&bi->res_list);
bi->ctx = NULL;
}
 
/**
* vmw_binding_add: Start tracking a context binding
*
* @cbs: Pointer to the context binding state tracker.
* @bi: Information about the binding to track.
*
* Starts tracking the binding in the context binding
* state structure @cbs.
*/
void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi,
u32 shader_slot, u32 slot)
{
struct vmw_ctx_bindinfo *loc =
vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
 
if (loc->ctx != NULL)
vmw_binding_drop(loc);
 
memcpy(loc, bi, b->size);
loc->scrubbed = false;
list_add(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list);
}
 
/**
* vmw_binding_transfer: Transfer a context binding tracking entry.
*
* @cbs: Pointer to the persistent context binding state tracker.
* @bi: Information about the binding to track.
*
*/
static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_binding_state *from,
const struct vmw_ctx_bindinfo *bi)
{
size_t offset = (unsigned long)bi - (unsigned long)from;
struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
((unsigned long) cbs + offset);
 
if (loc->ctx != NULL) {
WARN_ON(bi->scrubbed);
 
vmw_binding_drop(loc);
}
 
if (bi->res != NULL) {
memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
list_add_tail(&loc->ctx_list, &cbs->list);
list_add_tail(&loc->res_list, &loc->res->binding_head);
}
}
 
/**
* vmw_binding_state_kill - Kill all bindings associated with a
* struct vmw_ctx_binding state structure, and re-initialize the structure.
*
* @cbs: Pointer to the context binding state tracker.
*
* Emits commands to scrub all bindings associated with the
* context binding state tracker. Then re-initializes the whole structure.
*/
void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_bindinfo *entry, *next;
 
vmw_binding_state_scrub(cbs);
list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
vmw_binding_drop(entry);
}
 
/**
* vmw_binding_state_scrub - Scrub all bindings associated with a
* struct vmw_ctx_binding state structure.
*
* @cbs: Pointer to the context binding state tracker.
*
* Emits commands to scrub all bindings associated with the
* context binding state tracker.
*/
void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_bindinfo *entry;
 
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (!entry->scrubbed) {
(void) vmw_binding_infos[entry->bt].scrub_func
(entry, false);
entry->scrubbed = true;
}
}
 
(void) vmw_binding_emit_dirty(cbs);
}
 
/**
* vmw_binding_res_list_kill - Kill all bindings on a
* resource binding list
*
* @head: list head of resource binding list
*
* Kills all bindings associated with a specific resource. Typically
* called before the resource is destroyed.
*/
void vmw_binding_res_list_kill(struct list_head *head)
{
struct vmw_ctx_bindinfo *entry, *next;
 
vmw_binding_res_list_scrub(head);
list_for_each_entry_safe(entry, next, head, res_list)
vmw_binding_drop(entry);
}
 
/**
* vmw_binding_res_list_scrub - Scrub all bindings on a
* resource binding list
*
* @head: list head of resource binding list
*
* Scrub all bindings associated with a specific resource. Typically
* called before the resource is evicted.
*/
void vmw_binding_res_list_scrub(struct list_head *head)
{
struct vmw_ctx_bindinfo *entry;
 
list_for_each_entry(entry, head, res_list) {
if (!entry->scrubbed) {
(void) vmw_binding_infos[entry->bt].scrub_func
(entry, false);
entry->scrubbed = true;
}
}
 
list_for_each_entry(entry, head, res_list) {
struct vmw_ctx_binding_state *cbs =
vmw_context_binding_state(entry->ctx);
 
(void) vmw_binding_emit_dirty(cbs);
}
}
 
 
/**
* vmw_binding_state_commit - Commit staged binding info
*
* @ctx: Pointer to context to commit the staged binding info to.
* @from: Staged binding info built during execbuf.
* @scrubbed: Transfer only scrubbed bindings.
*
* Transfers binding info from a temporary structure
* (typically used by execbuf) to the persistent
* structure in the context. This can be done once commands have been
* submitted to hardware
*/
void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
struct vmw_ctx_binding_state *from)
{
struct vmw_ctx_bindinfo *entry, *next;
 
list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
vmw_binding_transfer(to, from, entry);
vmw_binding_drop(entry);
}
}
 
/**
* vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
*
* @ctx: The context resource
*
* Walks through the context binding list and rebinds all scrubbed
* resources.
*/
int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_bindinfo *entry;
int ret;
 
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (likely(!entry->scrubbed))
continue;
 
if ((entry->res == NULL || entry->res->id ==
SVGA3D_INVALID_ID))
continue;
 
ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
if (unlikely(ret != 0))
return ret;
 
entry->scrubbed = false;
}
 
return vmw_binding_emit_dirty(cbs);
}
 
/**
* vmw_binding_scrub_shader - scrub a shader binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_bindinfo_shader *binding =
container_of(bi, typeof(*binding), bi);
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for shader "
"unbinding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_binding_scrub_render_target - scrub a render target binding
* from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind)
{
struct vmw_ctx_bindinfo_view *binding =
container_of(bi, typeof(*binding), bi);
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for render target "
"unbinding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = binding->slot;
cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_binding_scrub_texture - scrub a texture binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*
* TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command.
*/
static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
bool rebind)
{
struct vmw_ctx_bindinfo_tex *binding =
container_of(bi, typeof(*binding), bi);
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
struct {
SVGA3dCmdSetTextureState c;
SVGA3dTextureState s1;
} body;
} *cmd;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for texture "
"unbinding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
cmd->header.size = sizeof(cmd->body);
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = binding->texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_bindinfo_shader *binding =
container_of(bi, typeof(*binding), bi);
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetShader body;
} *cmd;
 
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for DX shader "
"unbinding.\n");
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_bindinfo_cb *binding =
container_of(bi, typeof(*binding), bi);
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
 
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for DX shader "
"unbinding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
cmd->header.size = sizeof(cmd->body);
cmd->body.slot = binding->slot;
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
if (rebind) {
cmd->body.offsetInBytes = binding->offset;
cmd->body.sizeInBytes = binding->size;
cmd->body.sid = bi->res->id;
} else {
cmd->body.offsetInBytes = 0;
cmd->body.sizeInBytes = 0;
cmd->body.sid = SVGA3D_INVALID_ID;
}
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_collect_view_ids - Build view id data for a view binding command
* without checking which bindings actually need to be emitted
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
* @bi: Pointer to where the binding info array is stored in @cbs
* @max_num: Maximum number of entries in the @bi array.
*
* Scans the @bi array for bindings and builds a buffer of view id data.
* Stops at the first non-existing binding in the @bi array.
* On output, @cbs->bind_cmd_count contains the number of bindings to be
* emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
* contains the command data.
*/
static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi,
u32 max_num)
{
const struct vmw_ctx_bindinfo_view *biv =
container_of(bi, struct vmw_ctx_bindinfo_view, bi);
unsigned long i;
 
cbs->bind_cmd_count = 0;
cbs->bind_first_slot = 0;
 
for (i = 0; i < max_num; ++i, ++biv) {
if (!biv->bi.ctx)
break;
 
cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
((biv->bi.scrubbed) ?
SVGA3D_INVALID_ID : biv->bi.res->id);
}
}
 
/**
* vmw_collect_dirty_view_ids - Build view id data for a view binding command
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
* @bi: Pointer to where the binding info array is stored in @cbs
* @dirty: Bitmap indicating which bindings need to be emitted.
* @max_num: Maximum number of entries in the @bi array.
*
* Scans the @bi array for bindings that need to be emitted and
* builds a buffer of view id data.
* On output, @cbs->bind_cmd_count contains the number of bindings to be
* emitted, @cbs->bind_first_slot indicates the index of the first emitted
* binding, and @cbs->bind_cmd_buffer contains the command data.
*/
static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi,
unsigned long *dirty,
u32 max_num)
{
const struct vmw_ctx_bindinfo_view *biv =
container_of(bi, struct vmw_ctx_bindinfo_view, bi);
unsigned long i, next_bit;
 
cbs->bind_cmd_count = 0;
i = find_first_bit(dirty, max_num);
next_bit = i;
cbs->bind_first_slot = i;
 
biv += i;
for (; i < max_num; ++i, ++biv) {
cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
((!biv->bi.ctx || biv->bi.scrubbed) ?
SVGA3D_INVALID_ID : biv->bi.res->id);
 
if (next_bit == i) {
next_bit = find_next_bit(dirty, max_num, i + 1);
if (next_bit >= max_num)
break;
}
}
}
 
/**
* vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*/
static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
int shader_slot)
{
const struct vmw_ctx_bindinfo *loc =
&cbs->per_shader[shader_slot].shader_res[0].bi;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetShaderResources body;
} *cmd;
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 
vmw_collect_dirty_view_ids(cbs, loc,
cbs->per_shader[shader_slot].dirty_sr,
SVGA3D_DX_MAX_SRVIEWS);
if (cbs->bind_cmd_count == 0)
return 0;
 
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for DX shader"
" resource binding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
cmd->header.size = sizeof(cmd->body) + view_id_size;
cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
cmd->body.startView = cbs->bind_first_slot;
 
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 
vmw_fifo_commit(ctx->dev_priv, cmd_size);
bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
cbs->bind_first_slot, cbs->bind_cmd_count);
 
return 0;
}
 
/**
* vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*/
static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
{
const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetRenderTargets body;
} *cmd;
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for DX render-target"
" binding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
cmd->header.size = sizeof(cmd->body) + view_id_size;
 
if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
else
cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
 
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 
vmw_fifo_commit(ctx->dev_priv, cmd_size);
 
return 0;
 
}
 
/**
* vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
* without checking which bindings actually need to be emitted
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
* @bi: Pointer to where the binding info array is stored in @cbs
* @max_num: Maximum number of entries in the @bi array.
*
* Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
* Stops at the first non-existing binding in the @bi array.
* On output, @cbs->bind_cmd_count contains the number of bindings to be
* emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
* contains the command data.
*/
static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi,
u32 max_num)
{
const struct vmw_ctx_bindinfo_so *biso =
container_of(bi, struct vmw_ctx_bindinfo_so, bi);
unsigned long i;
SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
 
cbs->bind_cmd_count = 0;
cbs->bind_first_slot = 0;
 
for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
++cbs->bind_cmd_count) {
if (!biso->bi.ctx)
break;
 
if (!biso->bi.scrubbed) {
so_buffer->sid = biso->bi.res->id;
so_buffer->offset = biso->offset;
so_buffer->sizeInBytes = biso->size;
} else {
so_buffer->sid = SVGA3D_INVALID_ID;
so_buffer->offset = 0;
so_buffer->sizeInBytes = 0;
}
}
}
 
/**
* vmw_binding_emit_set_so - Issue delayed streamout binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*/
static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
{
const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSOTargets body;
} *cmd;
size_t cmd_size, so_target_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 
vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
if (cbs->bind_cmd_count == 0)
return 0;
 
so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
cmd_size = sizeof(*cmd) + so_target_size;
cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for DX SO target"
" binding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
cmd->header.size = sizeof(cmd->body) + so_target_size;
memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
 
vmw_fifo_commit(ctx->dev_priv, cmd_size);
 
return 0;
 
}
 
/**
* vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*
*/
static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
{
struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
u32 i;
int ret;
 
for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
continue;
 
ret = vmw_emit_set_sr(cbs, i);
if (ret)
break;
 
__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
}
 
return 0;
}
 
/**
* vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
* SVGA3dCmdDXSetVertexBuffers command
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
* @bi: Pointer to where the binding info array is stored in @cbs
* @dirty: Bitmap indicating which bindings need to be emitted.
* @max_num: Maximum number of entries in the @bi array.
*
* Scans the @bi array for bindings that need to be emitted and
* builds a buffer of SVGA3dVertexBuffer data.
* On output, @cbs->bind_cmd_count contains the number of bindings to be
* emitted, @cbs->bind_first_slot indicates the index of the first emitted
* binding, and @cbs->bind_cmd_buffer contains the command data.
*/
static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi,
unsigned long *dirty,
u32 max_num)
{
const struct vmw_ctx_bindinfo_vb *biv =
container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
unsigned long i, next_bit;
SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
 
cbs->bind_cmd_count = 0;
i = find_first_bit(dirty, max_num);
next_bit = i;
cbs->bind_first_slot = i;
 
biv += i;
for (; i < max_num; ++i, ++biv, ++vbs) {
if (!biv->bi.ctx || biv->bi.scrubbed) {
vbs->sid = SVGA3D_INVALID_ID;
vbs->stride = 0;
vbs->offset = 0;
} else {
vbs->sid = biv->bi.res->id;
vbs->stride = biv->stride;
vbs->offset = biv->offset;
}
cbs->bind_cmd_count++;
if (next_bit == i) {
next_bit = find_next_bit(dirty, max_num, i + 1);
if (next_bit >= max_num)
break;
}
}
}
 
/**
* vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*
*/
static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
{
const struct vmw_ctx_bindinfo *loc =
&cbs->vertex_buffers[0].bi;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetVertexBuffers body;
} *cmd;
size_t cmd_size, set_vb_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 
vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
SVGA3D_DX_MAX_VERTEXBUFFERS);
if (cbs->bind_cmd_count == 0)
return 0;
 
set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
cmd_size = sizeof(*cmd) + set_vb_size;
cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
" binding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
cmd->header.size = sizeof(cmd->body) + set_vb_size;
cmd->body.startBuffer = cbs->bind_first_slot;
 
memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
 
vmw_fifo_commit(ctx->dev_priv, cmd_size);
bitmap_clear(cbs->dirty_vb,
cbs->bind_first_slot, cbs->bind_cmd_count);
 
return 0;
}
 
/**
* vmw_binding_emit_dirty - Issue delayed binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*
* This function issues the delayed binding commands that arise from
* previous scrub / unscrub calls. These binding commands are typically
* commands that batch a number of bindings and therefore it makes sense
* to delay them.
*/
static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
{
int ret = 0;
unsigned long hit = 0;
 
while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
< VMW_BINDING_NUM_BITS) {
 
switch (hit) {
case VMW_BINDING_RT_BIT:
ret = vmw_emit_set_rt(cbs);
break;
case VMW_BINDING_PS_BIT:
ret = vmw_binding_emit_dirty_ps(cbs);
break;
case VMW_BINDING_SO_BIT:
ret = vmw_emit_set_so(cbs);
break;
case VMW_BINDING_VB_BIT:
ret = vmw_emit_set_vb(cbs);
break;
default:
BUG();
}
if (ret)
return ret;
 
__clear_bit(hit, &cbs->dirty);
hit++;
}
 
return 0;
}
 
/**
* vmw_binding_scrub_sr - Schedule a dx shaderresource binding
* scrub from a context
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_bindinfo_view *biv =
container_of(bi, struct vmw_ctx_bindinfo_view, bi);
struct vmw_ctx_binding_state *cbs =
vmw_context_binding_state(bi->ctx);
 
__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
__set_bit(VMW_BINDING_PS_SR_BIT,
&cbs->per_shader[biv->shader_slot].dirty);
__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
 
return 0;
}
 
/**
* vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
* scrub from a context
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_binding_state *cbs =
vmw_context_binding_state(bi->ctx);
 
__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
 
return 0;
}
 
/**
* vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
* scrub from a context
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_binding_state *cbs =
vmw_context_binding_state(bi->ctx);
 
__set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
 
return 0;
}
 
/**
* vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
* scrub from a context
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_bindinfo_vb *bivb =
container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
struct vmw_ctx_binding_state *cbs =
vmw_context_binding_state(bi->ctx);
 
__set_bit(bivb->slot, cbs->dirty_vb);
__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
 
return 0;
}
 
/**
* vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_bindinfo_ib *binding =
container_of(bi, typeof(*binding), bi);
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetIndexBuffer body;
} *cmd;
 
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for DX index buffer "
"binding.\n");
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
cmd->header.size = sizeof(cmd->body);
if (rebind) {
cmd->body.sid = bi->res->id;
cmd->body.format = binding->format;
cmd->body.offset = binding->offset;
} else {
cmd->body.sid = SVGA3D_INVALID_ID;
cmd->body.format = 0;
cmd->body.offset = 0;
}
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
* memory accounting.
*
* @dev_priv: Pointer to a device private structure.
*
* Returns a pointer to a newly allocated struct or an error pointer on error.
*/
struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv)
{
struct vmw_ctx_binding_state *cbs;
int ret;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
false, false);
if (ret)
return ERR_PTR(ret);
 
cbs = vzalloc(sizeof(*cbs));
if (!cbs) {
ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
return ERR_PTR(-ENOMEM);
}
 
cbs->dev_priv = dev_priv;
INIT_LIST_HEAD(&cbs->list);
 
return cbs;
}
 
/**
* vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
* memory accounting info.
*
* @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
*/
void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
{
struct vmw_private *dev_priv = cbs->dev_priv;
 
vfree(cbs);
ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
}
 
/**
* vmw_binding_state_list - Get the binding list of a
* struct vmw_ctx_binding_state
*
* @cbs: Pointer to the struct vmw_ctx_binding_state
*
* Returns the binding list which can be used to traverse through the bindings
* and access the resource information of all bindings.
*/
struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
{
return &cbs->list;
}
 
/**
* vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
*
* @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
*
* Drops all bindings registered in @cbs. No device binding actions are
* performed.
*/
void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_bindinfo *entry, *next;
 
list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
vmw_binding_drop(entry);
}
 
/*
* This function is unused at run-time, and only used to hold various build
* asserts important for code optimization assumptions.
*/
static void vmw_binding_build_asserts(void)
{
BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
 
/*
* struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
* view id arrays.
*/
BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
 
/*
* struct vmw_ctx_binding_state::bind_cmd_buffer is used for
* u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
*/
BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
VMW_MAX_VIEW_BINDINGS*sizeof(u32));
BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
VMW_MAX_VIEW_BINDINGS*sizeof(u32));
}
/drivers/video/drm/vmwgfx/vmwgfx_binding.h
0,0 → 1,209
/**************************************************************************
*
* Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _VMWGFX_BINDING_H_
#define _VMWGFX_BINDING_H_
 
#include "device_include/svga3d_reg.h"
#include <linux/list.h>
 
#define VMW_MAX_VIEW_BINDINGS 128
 
struct vmw_private;
struct vmw_ctx_binding_state;
 
/*
* enum vmw_ctx_binding_type - abstract resource to context binding types
*/
enum vmw_ctx_binding_type {
vmw_ctx_binding_shader,
vmw_ctx_binding_rt,
vmw_ctx_binding_tex,
vmw_ctx_binding_cb,
vmw_ctx_binding_dx_shader,
vmw_ctx_binding_dx_rt,
vmw_ctx_binding_sr,
vmw_ctx_binding_ds,
vmw_ctx_binding_so,
vmw_ctx_binding_vb,
vmw_ctx_binding_ib,
vmw_ctx_binding_max
};
 
/**
* struct vmw_ctx_bindinfo - single binding metadata
*
* @ctx_list: List head for the context's list of bindings.
* @res_list: List head for a resource's list of bindings.
* @ctx: Non-refcounted pointer to the context that owns the binding. NULL
* indicates no binding present.
* @res: Non-refcounted pointer to the resource the binding points to. This
* is typically a surface or a view.
* @bt: Binding type.
* @scrubbed: Whether the binding has been scrubbed from the context.
*/
struct vmw_ctx_bindinfo {
struct list_head ctx_list;
struct list_head res_list;
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
bool scrubbed;
};
 
/**
* struct vmw_ctx_bindinfo_tex - texture stage binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @texture_stage: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_tex {
struct vmw_ctx_bindinfo bi;
uint32 texture_stage;
};
 
/**
* struct vmw_ctx_bindinfo_shader - Shader binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @shader_slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_shader {
struct vmw_ctx_bindinfo bi;
SVGA3dShaderType shader_slot;
};
 
/**
* struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @shader_slot: Device data used to reconstruct binding command.
* @offset: Device data used to reconstruct binding command.
* @size: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_cb {
struct vmw_ctx_bindinfo bi;
SVGA3dShaderType shader_slot;
uint32 offset;
uint32 size;
uint32 slot;
};
 
/**
* struct vmw_ctx_bindinfo_view - View binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @shader_slot: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_view {
struct vmw_ctx_bindinfo bi;
SVGA3dShaderType shader_slot;
uint32 slot;
};
 
/**
* struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @offset: Device data used to reconstruct binding command.
* @size: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_so {
struct vmw_ctx_bindinfo bi;
uint32 offset;
uint32 size;
uint32 slot;
};
 
/**
* struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @offset: Device data used to reconstruct binding command.
* @stride: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_vb {
struct vmw_ctx_bindinfo bi;
uint32 offset;
uint32 stride;
uint32 slot;
};
 
/**
* struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @offset: Device data used to reconstruct binding command.
* @format: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_ib {
struct vmw_ctx_bindinfo bi;
uint32 offset;
uint32 format;
};
 
/**
* struct vmw_dx_shader_bindings - per shader type context binding state
*
* @shader: The shader binding for this shader type
* @const_buffer: Const buffer bindings for this shader type.
* @shader_res: Shader resource view bindings for this shader type.
* @dirty_sr: Bitmap tracking individual shader resource bindings changes
* that have not yet been emitted to the device.
* @dirty: Bitmap tracking per-binding type binding changes that have not
* yet been emitted to the device.
*/
struct vmw_dx_shader_bindings {
struct vmw_ctx_bindinfo_shader shader;
struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
unsigned long dirty;
};
 
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci,
u32 shader_slot, u32 slot);
extern void
vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
struct vmw_ctx_binding_state *from);
extern void vmw_binding_res_list_kill(struct list_head *head);
extern void vmw_binding_res_list_scrub(struct list_head *head);
extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
extern struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv);
extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
extern struct list_head *
vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_buffer.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
30,33 → 30,55
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
 
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
static struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
};
 
static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
static struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
 
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
static struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
};
 
static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
static struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
 
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
static struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
 
static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
static struct ttm_place gmr_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
 
static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB |
TTM_PL_FLAG_CACHED;
static struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
 
struct ttm_placement vmw_vram_placement = {
static struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
 
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
63,19 → 85,31
.busy_placement = &vram_placement_flags
};
 
static uint32_t vram_gmr_placement_flags[] = {
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
static struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
}
};
 
static uint32_t gmr_vram_placement_flags[] = {
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
static struct ttm_place gmr_vram_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
}
};
 
struct ttm_placement vmw_vram_gmr_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 2,
.placement = vram_gmr_placement_flags,
.num_busy_placement = 1,
82,14 → 116,21
.busy_placement = &gmr_placement_flags
};
 
static uint32_t vram_gmr_ne_placement_flags[] = {
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
static struct ttm_place vram_gmr_ne_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT
}, {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT
}
};
 
struct ttm_placement vmw_vram_gmr_ne_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 2,
.placement = vram_gmr_ne_placement_flags,
.num_busy_placement = 1,
97,8 → 138,6
};
 
struct ttm_placement vmw_vram_sys_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
106,8 → 145,6
};
 
struct ttm_placement vmw_vram_ne_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_ne_placement_flags,
.num_busy_placement = 1,
115,8 → 152,6
};
 
struct ttm_placement vmw_sys_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &sys_placement_flags,
.num_busy_placement = 1,
124,8 → 159,6
};
 
struct ttm_placement vmw_sys_ne_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &sys_ne_placement_flags,
.num_busy_placement = 1,
132,16 → 165,27
.busy_placement = &sys_ne_placement_flags
};
 
static uint32_t evictable_placement_flags[] = {
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
static struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
}
};
 
struct ttm_placement vmw_evictable_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 4,
.placement = evictable_placement_flags,
.num_busy_placement = 1,
149,8 → 193,6
};
 
struct ttm_placement vmw_srf_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.num_busy_placement = 2,
.placement = &gmr_placement_flags,
158,8 → 200,6
};
 
struct ttm_placement vmw_mob_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.num_busy_placement = 1,
.placement = &mob_placement_flags,
166,6 → 206,13
.busy_placement = &mob_placement_flags
};
 
struct ttm_placement vmw_mob_ne_placement = {
.num_placement = 1,
.num_busy_placement = 1,
.placement = &mob_ne_placement_flags,
.busy_placement = &mob_ne_placement_flags
};
 
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
768,48 → 815,10
}
 
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
*/
 
static void *vmw_sync_obj_ref(void *sync_obj)
{
 
return (void *)
vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
}
 
static void vmw_sync_obj_unref(void **sync_obj)
{
vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
}
 
static int vmw_sync_obj_flush(void *sync_obj)
{
vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
return 0;
}
 
static bool vmw_sync_obj_signaled(void *sync_obj)
{
return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
DRM_VMW_FENCE_FLAG_EXEC);
 
}
 
static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
DRM_VMW_FENCE_FLAG_EXEC,
lazy, interruptible,
VMW_FENCE_WAIT_TIMEOUT);
}
 
/**
* vmw_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @mem: The truct ttm_mem_reg indicating to what memory
* @mem: The struct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
* Calls move_notify for all subsystems needing it.
829,11 → 838,7
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
 
// spin_lock(&bdev->fence_lock);
// ttm_bo_wait(bo, false, false, false);
// spin_unlock(&bdev->fence_lock);
ttm_bo_wait(bo, false, false, false);
}
 
 
846,11 → 851,6
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
.sync_obj_signaled = vmw_sync_obj_signaled,
.sync_obj_wait = vmw_sync_obj_wait,
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
.move_notify = vmw_move_notify,
.swap_notify = vmw_swap_notify,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
857,237 → 857,3
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
};
 
 
struct scatterlist *sg_next(struct scatterlist *sg)
{
if (sg_is_last(sg))
return NULL;
 
sg++;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
 
return sg;
}
 
 
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
 
if (unlikely(!table->sgl))
return;
 
sgl = table->sgl;
while (table->orig_nents) {
unsigned int alloc_size = table->orig_nents;
unsigned int sg_size;
 
/*
* If we have more than max_ents segments left,
* then assign 'next' to the sg table after the current one.
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
if (alloc_size > max_ents) {
next = sg_chain_ptr(&sgl[max_ents - 1]);
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
next = NULL;
}
 
table->orig_nents -= sg_size;
kfree(sgl);
sgl = next;
}
 
table->sgl = NULL;
}
 
void sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
}
 
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
{
struct scatterlist *sg, *prv;
unsigned int left;
unsigned int max_ents = SG_MAX_SINGLE_ALLOC;
 
#ifndef ARCH_HAS_SG_CHAIN
BUG_ON(nents > max_ents);
#endif
 
memset(table, 0, sizeof(*table));
 
left = nents;
prv = NULL;
do {
unsigned int sg_size, alloc_size = left;
 
if (alloc_size > max_ents) {
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
 
left -= sg_size;
 
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
* entry of the previous table won't be used for
* linkage. Without this, sg_kfree() may get
* confused.
*/
if (prv)
table->nents = ++table->orig_nents;
 
goto err;
}
 
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
 
/*
* If this is the first mapping, assign the sg table header.
* If this is not the first mapping, chain previous part.
*/
if (prv)
sg_chain(prv, max_ents, sg);
else
table->sgl = sg;
 
/*
* If no more entries after this one, mark the end
*/
if (!left)
sg_mark_end(&sg[sg_size - 1]);
 
prv = sg;
} while (left);
 
return 0;
 
err:
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
 
return -ENOMEM;
}
 
 
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
#ifdef CONFIG_DEBUG_SG
{
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
}
#endif
sg_mark_end(&sgl[nents - 1]);
}
 
 
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset)
{
piter->__pg_advance = 0;
piter->__nents = nents;
 
piter->sg = sglist;
piter->sg_pgoffset = pgoffset;
}
 
static int sg_page_count(struct scatterlist *sg)
{
return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
}
 
bool __sg_page_iter_next(struct sg_page_iter *piter)
{
if (!piter->__nents || !piter->sg)
return false;
 
piter->sg_pgoffset += piter->__pg_advance;
piter->__pg_advance = 1;
 
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
piter->sg_pgoffset -= sg_page_count(piter->sg);
piter->sg = sg_next(piter->sg);
if (!--piter->__nents || !piter->sg)
return false;
}
 
return true;
}
EXPORT_SYMBOL(__sg_page_iter_next);
 
 
int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int n_pages,
unsigned long offset, unsigned long size,
gfp_t gfp_mask)
{
unsigned int chunks;
unsigned int i;
unsigned int cur_page;
int ret;
struct scatterlist *s;
 
/* compute number of contiguous chunks */
chunks = 1;
for (i = 1; i < n_pages; ++i)
if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
++chunks;
 
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
return ret;
 
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
unsigned long chunk_size;
unsigned int j;
 
/* look for the end of the current chunk */
for (j = cur_page + 1; j < n_pages; ++j)
if (page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
 
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;
}
 
return 0;
}
 
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir)
{
struct scatterlist *s;
int i;
 
for_each_sg(sglist, s, nelems, i) {
s->dma_address = (dma_addr_t)sg_phys(s);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
s->dma_length = s->length;
#endif
}
 
return nelems;
}
 
/drivers/video/drm/vmwgfx/vmwgfx_cmdbuf.c
0,0 → 1,1315
/**************************************************************************
*
* Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_api.h"
 
/*
* Size of inline command buffers. Try to make sure that a page size is a
* multiple of the DMA pool allocation size.
*/
#define VMW_CMDBUF_INLINE_ALIGN 64
#define VMW_CMDBUF_INLINE_SIZE \
(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
 
/**
* struct vmw_cmdbuf_context - Command buffer context queues
*
* @submitted: List of command buffers that have been submitted to the
* manager but not yet submitted to hardware.
* @hw_submitted: List of command buffers submitted to hardware.
* @preempted: List of preempted command buffers.
* @num_hw_submitted: Number of buffers currently being processed by hardware
*/
struct vmw_cmdbuf_context {
struct list_head submitted;
struct list_head hw_submitted;
struct list_head preempted;
unsigned num_hw_submitted;
};
 
/**
* struct vmw_cmdbuf_man: - Command buffer manager
*
* @cur_mutex: Mutex protecting the command buffer used for incremental small
* kernel command submissions, @cur.
* @space_mutex: Mutex to protect against starvation when we allocate
* main pool buffer space.
* @work: A struct work_struct implementeing command buffer error handling.
* Immutable.
* @dev_priv: Pointer to the device private struct. Immutable.
* @ctx: Array of command buffer context queues. The queues and the context
* data is protected by @lock.
* @error: List of command buffers that have caused device errors.
* Protected by @lock.
* @mm: Range manager for the command buffer space. Manager allocations and
* frees are protected by @lock.
* @cmd_space: Buffer object for the command buffer space, unless we were
* able to make a contigous coherent DMA memory allocation, @handle. Immutable.
* @map_obj: Mapping state for @cmd_space. Immutable.
* @map: Pointer to command buffer space. May be a mapped buffer object or
* a contigous coherent DMA memory allocation. Immutable.
* @cur: Command buffer for small kernel command submissions. Protected by
* the @cur_mutex.
* @cur_pos: Space already used in @cur. Protected by @cur_mutex.
* @default_size: Default size for the @cur command buffer. Immutable.
* @max_hw_submitted: Max number of in-flight command buffers the device can
* handle. Immutable.
* @lock: Spinlock protecting command submission queues.
* @header: Pool of DMA memory for device command buffer headers.
* Internal protection.
* @dheaders: Pool of DMA memory for device command buffer headers with trailing
* space for inline data. Internal protection.
* @tasklet: Tasklet struct for irq processing. Immutable.
* @alloc_queue: Wait queue for processes waiting to allocate command buffer
* space.
* @idle_queue: Wait queue for processes waiting for command buffer idle.
* @irq_on: Whether the process function has requested irq to be turned on.
* Protected by @lock.
* @using_mob: Whether the command buffer space is a MOB or a contigous DMA
* allocation. Immutable.
* @has_pool: Has a large pool of DMA memory which allows larger allocations.
* Typically this is false only during bootstrap.
* @handle: DMA address handle for the command buffer space if @using_mob is
* false. Immutable.
* @size: The size of the command buffer space. Immutable.
*/
struct vmw_cmdbuf_man {
struct mutex cur_mutex;
struct mutex space_mutex;
struct work_struct work;
struct vmw_private *dev_priv;
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
struct list_head error;
struct drm_mm mm;
struct ttm_buffer_object *cmd_space;
struct ttm_bo_kmap_obj map_obj;
u8 *map;
struct vmw_cmdbuf_header *cur;
size_t cur_pos;
size_t default_size;
unsigned max_hw_submitted;
spinlock_t lock;
struct dma_pool *headers;
struct dma_pool *dheaders;
// struct tasklet_struct tasklet;
wait_queue_head_t alloc_queue;
wait_queue_head_t idle_queue;
bool irq_on;
bool using_mob;
bool has_pool;
dma_addr_t handle;
size_t size;
};
 
/**
* struct vmw_cmdbuf_header - Command buffer metadata
*
* @man: The command buffer manager.
* @cb_header: Device command buffer header, allocated from a DMA pool.
* @cb_context: The device command buffer context.
* @list: List head for attaching to the manager lists.
* @node: The range manager node.
* @handle. The DMA address of @cb_header. Handed to the device on command
* buffer submission.
* @cmd: Pointer to the command buffer space of this buffer.
* @size: Size of the command buffer space of this buffer.
* @reserved: Reserved space of this buffer.
* @inline_space: Whether inline command buffer space is used.
*/
struct vmw_cmdbuf_header {
struct vmw_cmdbuf_man *man;
SVGACBHeader *cb_header;
SVGACBContext cb_context;
struct list_head list;
struct drm_mm_node node;
dma_addr_t handle;
u8 *cmd;
size_t size;
size_t reserved;
bool inline_space;
};
 
/**
* struct vmw_cmdbuf_dheader - Device command buffer header with inline
* command buffer space.
*
* @cb_header: Device command buffer header.
* @cmd: Inline command buffer space.
*/
struct vmw_cmdbuf_dheader {
SVGACBHeader cb_header;
u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
};
 
/**
* struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
*
* @page_size: Size of requested command buffer space in pages.
* @node: Pointer to the range manager node.
* @done: True if this allocation has succeeded.
*/
struct vmw_cmdbuf_alloc_info {
size_t page_size;
struct drm_mm_node *node;
bool done;
};
 
/* Loop over each context in the command buffer manager. */
#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
++(_i), ++(_ctx))
 
static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
 
 
/**
* vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
*
* @man: The range manager.
* @interruptible: Whether to wait interruptible when locking.
*/
static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
{
if (interruptible) {
if (mutex_lock_interruptible(&man->cur_mutex))
return -ERESTARTSYS;
} else {
mutex_lock(&man->cur_mutex);
}
 
return 0;
}
 
/**
* vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
*
* @man: The range manager.
*/
static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
{
mutex_unlock(&man->cur_mutex);
}
 
/**
* vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
* been used for the device context with inline command buffers.
* Need not be called locked.
*
* @header: Pointer to the header to free.
*/
static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_dheader *dheader;
 
if (WARN_ON_ONCE(!header->inline_space))
return;
 
dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
cb_header);
dma_pool_free(header->man->dheaders, dheader, header->handle);
kfree(header);
}
 
/**
* __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
* associated structures.
*
* header: Pointer to the header to free.
*
* For internal use. Must be called with man::lock held.
*/
static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_man *man = header->man;
 
if (header->inline_space) {
vmw_cmdbuf_header_inline_free(header);
return;
}
 
drm_mm_remove_node(&header->node);
wake_up_all(&man->alloc_queue);
if (header->cb_header)
dma_pool_free(man->headers, header->cb_header,
header->handle);
kfree(header);
}
 
/**
* vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
* associated structures.
*
* @header: Pointer to the header to free.
*/
void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_man *man = header->man;
 
/* Avoid locking if inline_space */
if (header->inline_space) {
vmw_cmdbuf_header_inline_free(header);
return;
}
spin_lock(&man->lock);
__vmw_cmdbuf_header_free(header);
spin_unlock(&man->lock);
}
 
 
/**
* vmw_cmbuf_header_submit: Submit a command buffer to hardware.
*
* @header: The header of the buffer to submit.
*/
static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_man *man = header->man;
u32 val;
 
if (sizeof(header->handle) > 4)
val = (header->handle >> 32);
else
val = 0;
vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 
val = (header->handle & 0xFFFFFFFFULL);
val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 
return header->cb_header->status;
}
 
/**
* vmw_cmdbuf_ctx_init: Initialize a command buffer context.
*
* @ctx: The command buffer context to initialize
*/
static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
{
INIT_LIST_HEAD(&ctx->hw_submitted);
INIT_LIST_HEAD(&ctx->submitted);
INIT_LIST_HEAD(&ctx->preempted);
ctx->num_hw_submitted = 0;
}
 
/**
* vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
* context.
*
* @man: The command buffer manager.
* @ctx: The command buffer context.
*
* Submits command buffers to hardware until there are no more command
* buffers to submit or the hardware can't handle more command buffers.
*/
static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_context *ctx)
{
while (ctx->num_hw_submitted < man->max_hw_submitted &&
!list_empty(&ctx->submitted)) {
struct vmw_cmdbuf_header *entry;
SVGACBStatus status;
 
entry = list_first_entry(&ctx->submitted,
struct vmw_cmdbuf_header,
list);
 
status = vmw_cmdbuf_header_submit(entry);
 
/* This should never happen */
if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
entry->cb_header->status = SVGA_CB_STATUS_NONE;
break;
}
 
list_del(&entry->list);
list_add_tail(&entry->list, &ctx->hw_submitted);
ctx->num_hw_submitted++;
}
 
}
 
/**
* vmw_cmdbuf_ctx_submit: Process a command buffer context.
*
* @man: The command buffer manager.
* @ctx: The command buffer context.
*
* Submit command buffers to hardware if possible, and process finished
* buffers. Typically freeing them, but on preemption or error take
* appropriate action. Wake up waiters if appropriate.
*/
static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_context *ctx,
int *notempty)
{
struct vmw_cmdbuf_header *entry, *next;
 
vmw_cmdbuf_ctx_submit(man, ctx);
 
list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
SVGACBStatus status = entry->cb_header->status;
 
if (status == SVGA_CB_STATUS_NONE)
break;
 
list_del(&entry->list);
wake_up_all(&man->idle_queue);
ctx->num_hw_submitted--;
switch (status) {
case SVGA_CB_STATUS_COMPLETED:
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
case SVGA_CB_STATUS_CB_HEADER_ERROR:
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
break;
case SVGA_CB_STATUS_PREEMPTED:
list_add(&entry->list, &ctx->preempted);
break;
default:
WARN_ONCE(true, "Undefined command buffer status.\n");
__vmw_cmdbuf_header_free(entry);
break;
}
}
 
vmw_cmdbuf_ctx_submit(man, ctx);
if (!list_empty(&ctx->submitted))
(*notempty)++;
}
 
/**
* vmw_cmdbuf_man_process - Process all command buffer contexts and
* switch on and off irqs as appropriate.
*
* @man: The command buffer manager.
*
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
* command buffers left that are not submitted to hardware, Make sure
* IRQ handling is turned on. Otherwise, make sure it's turned off.
*/
static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
{
int notempty;
struct vmw_cmdbuf_context *ctx;
int i;
 
retry:
notempty = 0;
for_each_cmdbuf_ctx(man, i, ctx)
vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 
if (man->irq_on && !notempty) {
vmw_generic_waiter_remove(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
man->irq_on = false;
} else if (!man->irq_on && notempty) {
vmw_generic_waiter_add(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
man->irq_on = true;
 
/* Rerun in case we just missed an irq. */
goto retry;
}
}
 
/**
* vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
* command buffer context
*
* @man: The command buffer manager.
* @header: The header of the buffer to submit.
* @cb_context: The command buffer context to use.
*
* This function adds @header to the "submitted" queue of the command
* buffer context identified by @cb_context. It then calls the command buffer
* manager processing to potentially submit the buffer to hardware.
* @man->lock needs to be held when calling this function.
*/
static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_header *header,
SVGACBContext cb_context)
{
if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
header->cb_header->dxContext = 0;
header->cb_context = cb_context;
list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 
vmw_cmdbuf_man_process(man);
}
 
/**
* vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
* handler implemented as a tasklet.
*
* @data: Tasklet closure. A pointer to the command buffer manager cast to
* an unsigned long.
*
* The bottom half (tasklet) of the interrupt handler simply calls into the
* command buffer processor to free finished buffers and submit any
* queued buffers to hardware.
*/
static void vmw_cmdbuf_man_tasklet(unsigned long data)
{
struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
 
spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
}
 
/**
* vmw_cmdbuf_work_func - The deferred work function that handles
* command buffer errors.
*
* @work: The work func closure argument.
*
* Restarting the command buffer context after an error requires process
* context, so it is deferred to this work function.
*/
static void vmw_cmdbuf_work_func(struct work_struct *work)
{
struct vmw_cmdbuf_man *man =
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
uint32_t dummy;
bool restart = false;
 
spin_lock(&man->lock);
list_for_each_entry_safe(entry, next, &man->error, list) {
restart = true;
DRM_ERROR("Command buffer error.\n");
 
list_del(&entry->list);
__vmw_cmdbuf_header_free(entry);
wake_up_all(&man->idle_queue);
}
spin_unlock(&man->lock);
 
if (restart && vmw_cmdbuf_startstop(man, true))
DRM_ERROR("Failed restarting command buffer context 0.\n");
 
/* Send a new fence in case one was removed */
vmw_fifo_send_fence(man->dev_priv, &dummy);
}
 
/**
* vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
*
* @man: The command buffer manager.
* @check_preempted: Check also the preempted queue for pending command buffers.
*
*/
static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
bool check_preempted)
{
struct vmw_cmdbuf_context *ctx;
bool idle = false;
int i;
 
spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
for_each_cmdbuf_ctx(man, i, ctx) {
if (!list_empty(&ctx->submitted) ||
!list_empty(&ctx->hw_submitted) ||
(check_preempted && !list_empty(&ctx->preempted)))
goto out_unlock;
}
 
idle = list_empty(&man->error);
 
out_unlock:
spin_unlock(&man->lock);
 
return idle;
}
 
/**
* __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
* command submissions
*
* @man: The command buffer manager.
*
* Flushes the current command buffer without allocating a new one. A new one
* is automatically allocated when needed. Call with @man->cur_mutex held.
*/
static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
{
struct vmw_cmdbuf_header *cur = man->cur;
 
WARN_ON(!mutex_is_locked(&man->cur_mutex));
 
if (!cur)
return;
 
spin_lock(&man->lock);
if (man->cur_pos == 0) {
__vmw_cmdbuf_header_free(cur);
goto out_unlock;
}
 
man->cur->cb_header->length = man->cur_pos;
vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
out_unlock:
spin_unlock(&man->lock);
man->cur = NULL;
man->cur_pos = 0;
}
 
/**
* vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
* command submissions
*
* @man: The command buffer manager.
* @interruptible: Whether to sleep interruptible when sleeping.
*
* Flushes the current command buffer without allocating a new one. A new one
* is automatically allocated when needed.
*/
int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible)
{
int ret = vmw_cmdbuf_cur_lock(man, interruptible);
 
if (ret)
return ret;
 
__vmw_cmdbuf_cur_flush(man);
vmw_cmdbuf_cur_unlock(man);
 
return 0;
}
 
/**
* vmw_cmdbuf_idle - Wait for command buffer manager idle.
*
* @man: The command buffer manager.
* @interruptible: Sleep interruptible while waiting.
* @timeout: Time out after this many ticks.
*
* Wait until the command buffer manager has processed all command buffers,
* or until a timeout occurs. If a timeout occurs, the function will return
* -EBUSY.
*/
int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
unsigned long timeout)
{
int ret;
 
ret = vmw_cmdbuf_cur_flush(man, interruptible);
vmw_generic_waiter_add(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
 
if (interruptible) {
ret = wait_event_interruptible_timeout
(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
timeout);
} else {
ret = wait_event_timeout
(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
timeout);
}
vmw_generic_waiter_remove(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
if (ret == 0) {
if (!vmw_cmdbuf_man_idle(man, true))
ret = -EBUSY;
else
ret = 0;
}
if (ret > 0)
ret = 0;
 
return ret;
}
 
/**
* vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
*
* @man: The command buffer manager.
* @info: Allocation info. Will hold the size on entry and allocated mm node
* on successful return.
*
* Try to allocate buffer space from the main pool. Returns true if succeeded.
* If a fatal error was hit, the error code is returned in @info->ret.
*/
static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_alloc_info *info)
{
int ret;
 
if (info->done)
return true;
memset(info->node, 0, sizeof(*info->node));
spin_lock(&man->lock);
ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) {
vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node_generic(&man->mm, info->node,
info->page_size, 0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
}
 
spin_unlock(&man->lock);
info->done = !ret;
 
return info->done;
}
 
/**
* vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
*
* @man: The command buffer manager.
* @node: Pointer to pre-allocated range-manager node.
* @size: The size of the allocation.
* @interruptible: Whether to sleep interruptible while waiting for space.
*
* This function allocates buffer space from the main pool, and if there is
* no space available ATM, it turns on IRQ handling and sleeps waiting for it to
* become available.
*/
static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
struct drm_mm_node *node,
size_t size,
bool interruptible)
{
struct vmw_cmdbuf_alloc_info info;
 
info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
info.node = node;
info.done = false;
 
/*
* To prevent starvation of large requests, only one allocating call
* at a time waiting for space.
*/
if (interruptible) {
if (mutex_lock_interruptible(&man->space_mutex))
return -ERESTARTSYS;
} else {
mutex_lock(&man->space_mutex);
}
 
/* Try to allocate space without waiting. */
if (vmw_cmdbuf_try_alloc(man, &info))
goto out_unlock;
 
vmw_generic_waiter_add(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
 
if (interruptible) {
int ret;
 
ret = wait_event_interruptible
(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
if (ret) {
vmw_generic_waiter_remove
(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
mutex_unlock(&man->space_mutex);
return ret;
}
} else {
wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
}
vmw_generic_waiter_remove(man->dev_priv,
SVGA_IRQFLAG_COMMAND_BUFFER,
&man->dev_priv->cmdbuf_waiters);
 
out_unlock:
mutex_unlock(&man->space_mutex);
 
return 0;
}
 
/**
* vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
* space from the main pool.
*
* @man: The command buffer manager.
* @header: Pointer to the header to set up.
* @size: The requested size of the buffer space.
* @interruptible: Whether to sleep interruptible while waiting for space.
*/
static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_header *header,
size_t size,
bool interruptible)
{
SVGACBHeader *cb_hdr;
size_t offset;
int ret;
 
if (!man->has_pool)
return -ENOMEM;
 
ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
 
if (ret)
return ret;
 
header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
&header->handle);
if (!header->cb_header) {
ret = -ENOMEM;
goto out_no_cb_header;
}
 
header->size = header->node.size << PAGE_SHIFT;
cb_hdr = header->cb_header;
offset = header->node.start << PAGE_SHIFT;
header->cmd = man->map + offset;
memset(cb_hdr, 0, sizeof(*cb_hdr));
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
}
 
return 0;
 
out_no_cb_header:
spin_lock(&man->lock);
drm_mm_remove_node(&header->node);
spin_unlock(&man->lock);
 
return ret;
}
 
/**
* vmw_cmdbuf_space_inline - Set up a command buffer header with
* inline command buffer space.
*
* @man: The command buffer manager.
* @header: Pointer to the header to set up.
* @size: The requested size of the buffer space.
*/
static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_header *header,
int size)
{
struct vmw_cmdbuf_dheader *dheader;
SVGACBHeader *cb_hdr;
 
if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
return -ENOMEM;
 
dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
&header->handle);
if (!dheader)
return -ENOMEM;
 
header->inline_space = true;
header->size = VMW_CMDBUF_INLINE_SIZE;
cb_hdr = &dheader->cb_header;
header->cb_header = cb_hdr;
header->cmd = dheader->cmd;
memset(dheader, 0, sizeof(*dheader));
cb_hdr->status = SVGA_CB_STATUS_NONE;
cb_hdr->flags = SVGA_CB_FLAG_NONE;
cb_hdr->ptr.pa = (u64)header->handle +
(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
 
return 0;
}
 
/**
* vmw_cmdbuf_alloc - Allocate a command buffer header complete with
* command buffer space.
*
* @man: The command buffer manager.
* @size: The requested size of the buffer space.
* @interruptible: Whether to sleep interruptible while waiting for space.
* @p_header: points to a header pointer to populate on successful return.
*
* Returns a pointer to command buffer space if successful. Otherwise
* returns an error pointer. The header pointer returned in @p_header should
* be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
*/
void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header)
{
struct vmw_cmdbuf_header *header;
int ret = 0;
 
*p_header = NULL;
 
header = kzalloc(sizeof(*header), GFP_KERNEL);
if (!header)
return ERR_PTR(-ENOMEM);
 
if (size <= VMW_CMDBUF_INLINE_SIZE)
ret = vmw_cmdbuf_space_inline(man, header, size);
else
ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
 
if (ret) {
kfree(header);
return ERR_PTR(ret);
}
 
header->man = man;
INIT_LIST_HEAD(&header->list);
header->cb_header->status = SVGA_CB_STATUS_NONE;
*p_header = header;
 
return header->cmd;
}
 
/**
* vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
* command buffer.
*
* @man: The command buffer manager.
* @size: The requested size of the commands.
* @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
* @interruptible: Whether to sleep interruptible while waiting for space.
*
* Returns a pointer to command buffer space if successful. Otherwise
* returns an error pointer.
*/
static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
size_t size,
int ctx_id,
bool interruptible)
{
struct vmw_cmdbuf_header *cur;
void *ret;
 
if (vmw_cmdbuf_cur_lock(man, interruptible))
return ERR_PTR(-ERESTARTSYS);
 
cur = man->cur;
if (cur && (size + man->cur_pos > cur->size ||
((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
ctx_id != cur->cb_header->dxContext)))
__vmw_cmdbuf_cur_flush(man);
 
if (!man->cur) {
ret = vmw_cmdbuf_alloc(man,
max_t(size_t, size, man->default_size),
interruptible, &man->cur);
if (IS_ERR(ret)) {
vmw_cmdbuf_cur_unlock(man);
return ret;
}
 
cur = man->cur;
}
 
if (ctx_id != SVGA3D_INVALID_ID) {
cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
cur->cb_header->dxContext = ctx_id;
}
 
cur->reserved = size;
 
return (void *) (man->cur->cmd + man->cur_pos);
}
 
/**
* vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
*
* @man: The command buffer manager.
* @size: The size of the commands actually written.
* @flush: Whether to flush the command buffer immediately.
*/
static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
size_t size, bool flush)
{
struct vmw_cmdbuf_header *cur = man->cur;
 
WARN_ON(!mutex_is_locked(&man->cur_mutex));
 
WARN_ON(size > cur->reserved);
man->cur_pos += size;
if (!size)
cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
if (flush)
__vmw_cmdbuf_cur_flush(man);
vmw_cmdbuf_cur_unlock(man);
}
 
/**
* vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
*
* @man: The command buffer manager.
* @size: The requested size of the commands.
* @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
* @interruptible: Whether to sleep interruptible while waiting for space.
* @header: Header of the command buffer. NULL if the current command buffer
* should be used.
*
* Returns a pointer to command buffer space if successful. Otherwise
* returns an error pointer.
*/
void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
int ctx_id, bool interruptible,
struct vmw_cmdbuf_header *header)
{
if (!header)
return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
 
if (size > header->size)
return ERR_PTR(-EINVAL);
 
if (ctx_id != SVGA3D_INVALID_ID) {
header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
header->cb_header->dxContext = ctx_id;
}
 
header->reserved = size;
return header->cmd;
}
 
/**
* vmw_cmdbuf_commit - Commit commands in a command buffer.
*
* @man: The command buffer manager.
* @size: The size of the commands actually written.
* @header: Header of the command buffer. NULL if the current command buffer
* should be used.
* @flush: Whether to flush the command buffer immediately.
*/
void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header, bool flush)
{
if (!header) {
vmw_cmdbuf_commit_cur(man, size, flush);
return;
}
 
(void) vmw_cmdbuf_cur_lock(man, false);
__vmw_cmdbuf_cur_flush(man);
WARN_ON(size > header->reserved);
man->cur = header;
man->cur_pos = size;
if (!size)
header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
if (flush)
__vmw_cmdbuf_cur_flush(man);
vmw_cmdbuf_cur_unlock(man);
}
 
/**
* vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
*
* @man: The command buffer manager.
*/
void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
{
if (!man)
return;
 
// tasklet_schedule(&man->tasklet);
}
 
/**
* vmw_cmdbuf_send_device_command - Send a command through the device context.
*
* @man: The command buffer manager.
* @command: Pointer to the command to send.
* @size: Size of the command.
*
* Synchronously sends a device context command.
*/
static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
const void *command,
size_t size)
{
struct vmw_cmdbuf_header *header;
int status;
void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
 
if (IS_ERR(cmd))
return PTR_ERR(cmd);
 
memcpy(cmd, command, size);
header->cb_header->length = size;
header->cb_context = SVGA_CB_CONTEXT_DEVICE;
spin_lock(&man->lock);
status = vmw_cmdbuf_header_submit(header);
spin_unlock(&man->lock);
vmw_cmdbuf_header_free(header);
 
if (status != SVGA_CB_STATUS_COMPLETED) {
DRM_ERROR("Device context command failed with status %d\n",
status);
return -EINVAL;
}
 
return 0;
}
 
/**
* vmw_cmdbuf_startstop - Send a start / stop command through the device
* context.
*
* @man: The command buffer manager.
* @enable: Whether to enable or disable the context.
*
* Synchronously sends a device start / stop context command.
*/
static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
bool enable)
{
struct {
uint32 id;
SVGADCCmdStartStop body;
} __packed cmd;
 
cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
cmd.body.enable = (enable) ? 1 : 0;
cmd.body.context = SVGA_CB_CONTEXT_0;
 
return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
}
 
/**
* vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
*
* @man: The command buffer manager.
* @size: The size of the main space pool.
* @default_size: The default size of the command buffer for small kernel
* submissions.
*
* Set the size and allocate the main command buffer space pool,
* as well as the default size of the command buffer for
* small kernel submissions. If successful, this enables large command
* submissions. Note that this function requires that rudimentary command
* submission is already available and that the MOB memory manager is alive.
* Returns 0 on success. Negative error code on failure.
*/
int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
size_t size, size_t default_size)
{
struct vmw_private *dev_priv = man->dev_priv;
bool dummy;
int ret;
 
if (man->has_pool)
return -EINVAL;
 
/* First, try to allocate a huge chunk of DMA memory */
size = PAGE_ALIGN(size);
man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
&man->handle, GFP_KERNEL);
if (man->map) {
man->using_mob = false;
} else {
/*
* DMA memory failed. If we can have command buffers in a
* MOB, try to use that instead. Note that this will
* actually call into the already enabled manager, when
* binding the MOB.
*/
if (!(dev_priv->capabilities & SVGA_CAP_DX))
return -ENOMEM;
 
ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
&vmw_mob_ne_placement, 0, false, NULL,
&man->cmd_space);
if (ret)
return ret;
 
man->using_mob = true;
ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
&man->map_obj);
if (ret)
goto out_no_map;
 
man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
}
 
man->size = size;
drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
 
man->has_pool = true;
 
/*
* For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
* prevent deadlocks from happening when vmw_cmdbuf_space_pool()
* needs to wait for space and we block on further command
* submissions to be able to free up space.
*/
man->default_size = VMW_CMDBUF_INLINE_SIZE;
DRM_INFO("Using command buffers with %s pool.\n",
(man->using_mob) ? "MOB" : "DMA");
 
return 0;
 
out_no_map:
if (man->using_mob)
ttm_bo_unref(&man->cmd_space);
 
return ret;
}
 
/**
* vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
* inline command buffer submissions only.
*
* @dev_priv: Pointer to device private structure.
*
* Returns a pointer to a cummand buffer manager to success or error pointer
* on failure. The command buffer manager will be enabled for submissions of
* size VMW_CMDBUF_INLINE_SIZE only.
*/
struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
{
struct vmw_cmdbuf_man *man;
struct vmw_cmdbuf_context *ctx;
int i;
int ret;
 
if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
return ERR_PTR(-ENOSYS);
 
man = kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
return ERR_PTR(-ENOMEM);
 
man->headers = dma_pool_create("vmwgfx cmdbuf",
&dev_priv->dev->pdev->dev,
sizeof(SVGACBHeader),
64, PAGE_SIZE);
if (!man->headers) {
ret = -ENOMEM;
goto out_no_pool;
}
 
man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
&dev_priv->dev->pdev->dev,
sizeof(struct vmw_cmdbuf_dheader),
64, PAGE_SIZE);
if (!man->dheaders) {
ret = -ENOMEM;
goto out_no_dpool;
}
 
for_each_cmdbuf_ctx(man, i, ctx)
vmw_cmdbuf_ctx_init(ctx);
 
INIT_LIST_HEAD(&man->error);
spin_lock_init(&man->lock);
mutex_init(&man->cur_mutex);
mutex_init(&man->space_mutex);
// tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
// (unsigned long) man);
man->default_size = VMW_CMDBUF_INLINE_SIZE;
init_waitqueue_head(&man->alloc_queue);
init_waitqueue_head(&man->idle_queue);
man->dev_priv = dev_priv;
man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
&dev_priv->error_waiters);
ret = vmw_cmdbuf_startstop(man, true);
if (ret) {
DRM_ERROR("Failed starting command buffer context 0.\n");
vmw_cmdbuf_man_destroy(man);
return ERR_PTR(ret);
}
 
return man;
 
out_no_dpool:
dma_pool_destroy(man->headers);
out_no_pool:
kfree(man);
 
return ERR_PTR(ret);
}
 
/**
* vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
*
* @man: Pointer to a command buffer manager.
*
* This function removes the main buffer space pool, and should be called
* before MOB memory management is removed. When this function has been called,
* only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
* less are allowed, and the default size of the command buffer for small kernel
* submissions is also set to this size.
*/
void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
{
if (!man->has_pool)
return;
 
man->has_pool = false;
man->default_size = VMW_CMDBUF_INLINE_SIZE;
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (man->using_mob) {
(void) ttm_bo_kunmap(&man->map_obj);
ttm_bo_unref(&man->cmd_space);
} else {
// dma_free_coherent(&man->dev_priv->dev->pdev->dev,
// man->size, man->map, man->handle);
}
}
 
/**
* vmw_cmdbuf_man_destroy - Take down a command buffer manager.
*
* @man: Pointer to a command buffer manager.
*
* This function idles and then destroys a command buffer manager.
*/
void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
{
WARN_ON_ONCE(man->has_pool);
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (vmw_cmdbuf_startstop(man, false))
DRM_ERROR("Failed stopping command buffer context 0.\n");
 
vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
&man->dev_priv->error_waiters);
// tasklet_kill(&man->tasklet);
// (void) cancel_work_sync(&man->work);
dma_pool_destroy(man->dheaders);
dma_pool_destroy(man->headers);
mutex_destroy(&man->cur_mutex);
mutex_destroy(&man->space_mutex);
kfree(man);
}
/drivers/video/drm/vmwgfx/vmwgfx_cmdbuf_res.c
0,0 → 1,346
/**************************************************************************
*
* Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
 
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
 
/**
* struct vmw_cmdbuf_res - Command buffer managed resource entry.
*
* @res: Refcounted pointer to a struct vmw_resource.
* @hash: Hash entry for the manager hash table.
* @head: List head used either by the staging list or the manager list
* of commited resources.
* @state: Staging state of this resource entry.
* @man: Pointer to a resource manager for this entry.
*/
struct vmw_cmdbuf_res {
struct vmw_resource *res;
struct drm_hash_item hash;
struct list_head head;
enum vmw_cmdbuf_res_state state;
struct vmw_cmdbuf_res_manager *man;
};
 
/**
* struct vmw_cmdbuf_res_manager - Command buffer resource manager.
*
* @resources: Hash table containing staged and commited command buffer
* resources
* @list: List of commited command buffer resources.
* @dev_priv: Pointer to a device private structure.
*
* @resources and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_cmdbuf_res_manager {
struct drm_open_hash resources;
struct list_head list;
struct vmw_private *dev_priv;
};
 
 
/**
* vmw_cmdbuf_res_lookup - Look up a command buffer resource
*
* @man: Pointer to the command buffer resource manager
* @resource_type: The resource type, that combined with the user key
* identifies the resource.
* @user_key: The user key.
*
* Returns a valid refcounted struct vmw_resource pointer on success,
* an error pointer on failure.
*/
struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key)
{
struct drm_hash_item *hash;
int ret;
unsigned long key = user_key | (res_type << 24);
 
ret = drm_ht_find_item(&man->resources, key, &hash);
if (unlikely(ret != 0))
return ERR_PTR(ret);
 
return vmw_resource_reference
(drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
}
 
/**
* vmw_cmdbuf_res_free - Free a command buffer resource.
*
* @man: Pointer to the command buffer resource manager
* @entry: Pointer to a struct vmw_cmdbuf_res.
*
* Frees a struct vmw_cmdbuf_res entry and drops its reference to the
* struct vmw_resource.
*/
static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
struct vmw_cmdbuf_res *entry)
{
list_del(&entry->head);
WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
vmw_resource_unreference(&entry->res);
kfree(entry);
}
 
/**
* vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
*
* @list: Caller's list of command buffer resource actions.
*
* This function commits a list of command buffer resource
* additions or removals.
* It is typically called when the execbuf ioctl call triggering these
* actions has commited the fifo contents to the device.
*/
void vmw_cmdbuf_res_commit(struct list_head *list)
{
struct vmw_cmdbuf_res *entry, *next;
 
list_for_each_entry_safe(entry, next, list, head) {
list_del(&entry->head);
if (entry->res->func->commit_notify)
entry->res->func->commit_notify(entry->res,
entry->state);
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
entry->state = VMW_CMDBUF_RES_COMMITTED;
list_add_tail(&entry->head, &entry->man->list);
break;
case VMW_CMDBUF_RES_DEL:
vmw_resource_unreference(&entry->res);
kfree(entry);
break;
default:
BUG();
break;
}
}
}
 
/**
* vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
*
* @man: Pointer to the command buffer resource manager
* @list: Caller's list of command buffer resource action
*
* This function reverts a list of command buffer resource
* additions or removals.
* It is typically called when the execbuf ioctl call triggering these
* actions failed for some reason, and the command stream was never
* submitted.
*/
void vmw_cmdbuf_res_revert(struct list_head *list)
{
struct vmw_cmdbuf_res *entry, *next;
int ret;
 
list_for_each_entry_safe(entry, next, list, head) {
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
vmw_cmdbuf_res_free(entry->man, entry);
break;
case VMW_CMDBUF_RES_DEL:
ret = drm_ht_insert_item(&entry->man->resources,
&entry->hash);
list_del(&entry->head);
list_add_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITTED;
break;
default:
BUG();
break;
}
}
}
 
/**
* vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
*
* @man: Pointer to the command buffer resource manager.
* @res_type: The resource type.
* @user_key: The user-space id of the resource.
* @res: Valid (refcount != 0) pointer to a struct vmw_resource.
* @list: The staging list.
*
* This function allocates a struct vmw_cmdbuf_res entry and adds the
* resource to the hash table of the manager identified by @man. The
* entry is then put on the staging list identified by @list.
*/
int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct vmw_resource *res,
struct list_head *list)
{
struct vmw_cmdbuf_res *cres;
int ret;
 
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
if (unlikely(cres == NULL))
return -ENOMEM;
 
cres->hash.key = user_key | (res_type << 24);
ret = drm_ht_insert_item(&man->resources, &cres->hash);
if (unlikely(ret != 0))
goto out_invalid_key;
 
cres->state = VMW_CMDBUF_RES_ADD;
cres->res = vmw_resource_reference(res);
cres->man = man;
list_add_tail(&cres->head, list);
 
out_invalid_key:
return ret;
}
 
/**
* vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
*
* @man: Pointer to the command buffer resource manager.
* @res_type: The resource type.
* @user_key: The user-space id of the resource.
* @list: The staging list.
* @res_p: If the resource is in an already committed state, points to the
* struct vmw_resource on successful return. The pointer will be
* non ref-counted.
*
* This function looks up the struct vmw_cmdbuf_res entry from the manager
* hash table and, if it exists, removes it. Depending on its current staging
* state it then either removes the entry from the staging list or adds it
* to it with a staging state of removal.
*/
int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct list_head *list,
struct vmw_resource **res_p)
{
struct vmw_cmdbuf_res *entry;
struct drm_hash_item *hash;
int ret;
 
ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24),
&hash);
if (likely(ret != 0))
return -EINVAL;
 
entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
 
switch (entry->state) {
case VMW_CMDBUF_RES_ADD:
vmw_cmdbuf_res_free(man, entry);
*res_p = NULL;
break;
case VMW_CMDBUF_RES_COMMITTED:
(void) drm_ht_remove_item(&man->resources, &entry->hash);
list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list);
*res_p = entry->res;
break;
default:
BUG();
break;
}
 
return 0;
}
 
/**
* vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
* manager.
*
* @dev_priv: Pointer to a struct vmw_private
*
* Allocates and initializes a command buffer managed resource manager. Returns
* an error pointer on failure.
*/
struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
{
struct vmw_cmdbuf_res_manager *man;
int ret;
 
man = kzalloc(sizeof(*man), GFP_KERNEL);
if (man == NULL)
return ERR_PTR(-ENOMEM);
 
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
if (ret == 0)
return man;
 
kfree(man);
return ERR_PTR(ret);
}
 
/**
* vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
* manager.
*
* @man: Pointer to the manager to destroy.
*
* This function destroys a command buffer managed resource manager and
* unreferences / frees all command buffer managed resources and -entries
* associated with it.
*/
void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
{
struct vmw_cmdbuf_res *entry, *next;
 
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
 
kfree(man);
}
 
/**
*
* vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
* resource manager
*
* Returns the approximate allocation size of a command buffer managed
* resource manager.
*/
size_t vmw_cmdbuf_res_man_size(void)
{
static size_t res_man_size;
 
if (unlikely(res_man_size == 0))
res_man_size =
ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
ttm_round_pot(sizeof(struct hlist_head) <<
VMW_CMDBUF_RES_MAN_HT_ORDER);
 
return res_man_size;
}
/drivers/video/drm/vmwgfx/vmwgfx_context.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
27,19 → 27,19
 
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
#include "ttm/ttm_placement.h"
 
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
struct vmw_ctx_binding_state cbs;
struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
spinlock_t cotable_lock;
struct vmw_dma_buffer *dx_query_mob;
};
 
 
 
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
 
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
51,12 → 51,14
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static int vmw_dx_context_create(struct vmw_resource *res);
static int vmw_dx_context_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_context_destroy(struct vmw_resource *res);
 
static uint64_t vmw_user_context_size;
 
static const struct vmw_user_resource_conv user_context_conv = {
93,15 → 95,38
.unbind = vmw_gb_context_unbind
};
 
static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context,
.needs_backup = true,
.may_evict = true,
.type_name = "dx contexts",
.backup_placement = &vmw_mob_placement,
.create = vmw_dx_context_create,
.destroy = vmw_dx_context_destroy,
.bind = vmw_dx_context_bind,
.unbind = vmw_dx_context_unbind
};
 
/**
* Context management:
*/
 
static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
{
struct vmw_resource *res;
int i;
 
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
spin_lock(&uctx->cotable_lock);
res = uctx->cotables[i];
uctx->cotables[i] = NULL;
spin_unlock(&uctx->cotable_lock);
 
if (res)
vmw_resource_unreference(&res);
}
}
 
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
struct vmw_user_context *uctx =
113,17 → 138,19
} *cmd;
 
 
if (res->func->destroy == vmw_gb_context_destroy) {
if (res->func->destroy == vmw_gb_context_destroy ||
res->func->destroy == vmw_dx_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
(void) vmw_context_binding_state_kill(&uctx->cbs);
(void) vmw_gb_context_destroy(res);
vmw_binding_state_kill(uctx->cbs);
(void) res->func->destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
vmw_context_cotables_unref(uctx);
return;
}
 
135,31 → 162,35
return;
}
 
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
cmd->body.cid = cpu_to_le32(res->id);
cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_3d_resource_dec(dev_priv, false);
vmw_fifo_resource_dec(dev_priv);
}
 
static int vmw_gb_context_init(struct vmw_private *dev_priv,
bool dx,
struct vmw_resource *res,
void (*res_free) (struct vmw_resource *res))
{
int ret;
int ret, i;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
 
res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
SVGA3D_CONTEXT_DATA_SIZE);
ret = vmw_resource_init(dev_priv, res, true,
res_free, &vmw_gb_context_func);
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
res_free,
dx ? &vmw_dx_context_func :
&vmw_gb_context_func);
if (unlikely(ret != 0))
goto out_err;
 
if (dev_priv->has_mob) {
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
if (unlikely(IS_ERR(uctx->man))) {
if (IS_ERR(uctx->man)) {
ret = PTR_ERR(uctx->man);
uctx->man = NULL;
goto out_err;
166,12 → 197,32
}
}
 
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
INIT_LIST_HEAD(&uctx->cbs.list);
uctx->cbs = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(uctx->cbs)) {
ret = PTR_ERR(uctx->cbs);
goto out_err;
}
 
spin_lock_init(&uctx->cotable_lock);
 
if (dx) {
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
if (unlikely(uctx->cotables[i] == NULL)) {
ret = -ENOMEM;
goto out_cotables;
}
}
}
 
 
 
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
 
out_cotables:
vmw_context_cotables_unref(uctx);
out_err:
if (res_free)
res_free(res);
182,7 → 233,8
 
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
void (*res_free) (struct vmw_resource *res))
void (*res_free)(struct vmw_resource *res),
bool dx)
{
int ret;
 
192,7 → 244,7
} *cmd;
 
if (dev_priv->has_mob)
return vmw_gb_context_init(dev_priv, res, res_free);
return vmw_gb_context_init(dev_priv, dx, res, res_free);
 
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
215,12 → 267,12
return -ENOMEM;
}
 
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
cmd->body.cid = cpu_to_le32(res->id);
cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
(void) vmw_3d_resource_inc(dev_priv, false);
vmw_fifo_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
 
232,20 → 284,11
return ret;
}
 
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
{
struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
int ret;
 
if (unlikely(res == NULL))
return NULL;
/*
* GB context.
*/
 
ret = vmw_context_init(dev_priv, res, NULL);
 
return (ret == 0) ? res : NULL;
}
 
 
static int vmw_gb_context_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
281,7 → 324,7
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
(void) vmw_3d_resource_inc(dev_priv, false);
vmw_fifo_resource_inc(dev_priv);
 
return 0;
 
309,7 → 352,6
"binding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
346,7 → 388,7
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_state_scrub(&uctx->cbs);
vmw_binding_state_scrub(uctx->cbs);
 
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
414,12 → 456,236
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
vmw_3d_resource_dec(dev_priv, false);
vmw_fifo_resource_dec(dev_priv);
 
return 0;
}
 
/*
* DX context.
*/
 
static int vmw_dx_context_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDefineContext body;
} *cmd;
 
if (likely(res->id != -1))
return 0;
 
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a context id.\n");
goto out_no_id;
}
 
if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
ret = -EBUSY;
goto out_no_fifo;
}
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for context "
"creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
 
cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
 
return 0;
 
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
return ret;
}
 
static int vmw_dx_context_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindContext body;
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
 
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for context "
"binding.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->mem.start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
 
return 0;
}
 
/**
* vmw_dx_context_scrub_cotables - Scrub all bindings and
* cotables from a context
*
* @ctx: Pointer to the context resource
* @readback: Whether to save the otable contents on scrubbing.
*
* COtables must be unbound before their context, but unbinding requires
* the backup buffer being reserved, whereas scrubbing does not.
* This function scrubs all cotables of a context, potentially reading back
* the contents into their backup buffers. However, scrubbing cotables
* also makes the device context invalid, so scrub all bindings first so
* that doesn't have to be done later with an invalid context.
*/
void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback)
{
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
int i;
 
vmw_binding_state_scrub(uctx->cbs);
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
struct vmw_resource *res;
 
/* Avoid racing with ongoing cotable destruction. */
spin_lock(&uctx->cotable_lock);
res = uctx->cotables[vmw_cotable_scrub_order[i]];
if (res)
res = vmw_resource_reference_unless_doomed(res);
spin_unlock(&uctx->cotable_lock);
if (!res)
continue;
 
WARN_ON(vmw_cotable_scrub(res, readback));
vmw_resource_unreference(&res);
}
}
 
static int vmw_dx_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXReadbackContext body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindContext body;
} *cmd2;
uint32_t submit_size;
uint8_t *cmd;
 
 
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_context_scrub_cotables(res, readback);
 
if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
readback) {
WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
if (vmw_query_readback_all(uctx->dx_query_mob))
DRM_ERROR("Failed to read back query states\n");
}
 
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
cmd = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for context "
"unbinding.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
 
cmd2 = (void *) cmd;
if (readback) {
cmd1 = (void *) cmd;
cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.cid = res->id;
cmd2 = (void *) (&cmd1[1]);
}
cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.cid = res->id;
cmd2->body.mobid = SVGA3D_INVALID_ID;
 
vmw_fifo_commit(dev_priv, submit_size);
mutex_unlock(&dev_priv->binding_mutex);
 
/*
* Create a fence object and fence the backup buffer.
*/
 
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
 
vmw_fence_single_bo(bo, fence);
 
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
 
return 0;
}
 
static int vmw_dx_context_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDestroyContext body;
} *cmd;
 
if (likely(res->id == -1))
return 0;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for context "
"destruction.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
 
return 0;
}
 
/**
* User-space context management:
*/
 
435,7 → 701,12
container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
 
// ttm_base_object_kfree(ctx, base);
if (ctx->cbs)
vmw_binding_state_free(ctx->cbs);
 
(void) vmw_context_bind_dx_query(res, NULL);
 
ttm_base_object_kfree(ctx, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
}
466,8 → 737,8
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
}
 
int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
static int vmw_context_define(struct drm_device *dev, void *data,
struct drm_file *file_priv, bool dx)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_context *ctx;
477,6 → 748,10
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
 
if (!dev_priv->has_dx && dx) {
DRM_ERROR("DX contexts not supported by device.\n");
return -EINVAL;
}
 
/*
* Approximate idr memory usage with 128 bytes. It will be limited
517,7 → 792,7
* From here on, the destructor takes over resource freeing.
*/
 
ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
if (unlikely(ret != 0))
goto out_unlock;
 
536,388 → 811,106
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
 
}
#endif
 
/**
* vmw_context_scrub_shader - scrub a shader binding from a context.
* vmw_context_binding_list - Return a list of context bindings
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
* @ctx: The context resource
*
* Returns the current list of bindings of the given context. Note that
* this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for shader "
"unbinding.\n");
return -ENOMEM;
return vmw_binding_state_list(uctx->cbs);
}
 
cmd->header.id = SVGA_3D_CMD_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type;
cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_context_scrub_render_target - scrub a render target binding
* from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind)
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for render target "
"unbinding.\n");
return -ENOMEM;
return container_of(ctx, struct vmw_user_context, res)->man;
}
 
cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type;
cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_context_scrub_texture - scrub a texture binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*
* TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command.
*/
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
bool rebind)
struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
SVGACOTableType cotable_type)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
SVGA3dCmdHeader header;
struct {
SVGA3dCmdSetTextureState c;
SVGA3dTextureState s1;
} body;
} *cmd;
if (cotable_type >= SVGA_COTABLE_DX10_MAX)
return ERR_PTR(-EINVAL);
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for texture "
"unbinding.\n");
return -ENOMEM;
return vmw_resource_reference
(container_of(ctx, struct vmw_user_context, res)->
cotables[cotable_type]);
}
 
 
cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
cmd->header.size = sizeof(cmd->body);
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_context_binding_drop: Stop tracking a context binding
* vmw_context_binding_state -
* Return a pointer to a context binding state structure
*
* @cb: Pointer to binding tracker storage.
* @ctx: The context resource
*
* Stops tracking a context binding, and re-initializes its storage.
* Typically used when the context binding is replaced with a binding to
* another (or the same, for that matter) resource.
* Returns the current state of bindings of the given context. Note that
* this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
struct vmw_ctx_binding_state *
vmw_context_binding_state(struct vmw_resource *ctx)
{
list_del(&cb->ctx_list);
if (!list_empty(&cb->res_list))
list_del(&cb->res_list);
cb->bi.ctx = NULL;
return container_of(ctx, struct vmw_user_context, res)->cbs;
}
 
/**
* vmw_context_binding_add: Start tracking a context binding
* vmw_context_bind_dx_query -
* Sets query MOB for the context. If @mob is NULL, then this function will
* remove the association between the MOB and the context. This function
* assumes the binding_mutex is held.
*
* @cbs: Pointer to the context binding state tracker.
* @bi: Information about the binding to track.
* @ctx_res: The context resource
* @mob: a reference to the query MOB
*
* Performs basic checks on the binding to make sure arguments are within
* bounds and then starts tracking the binding in the context binding
* state structure @cbs.
* Returns -EINVAL if a MOB has already been set and does not match the one
* specified in the parameter. 0 otherwise.
*/
int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi)
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_dma_buffer *mob)
{
struct vmw_ctx_binding *loc;
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
 
switch (bi->bt) {
case vmw_ctx_binding_rt:
if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
DRM_ERROR("Illegal render target type %u.\n",
(unsigned) bi->i1.rt_type);
return -EINVAL;
if (mob == NULL) {
if (uctx->dx_query_mob) {
uctx->dx_query_mob->dx_query_ctx = NULL;
vmw_dmabuf_unreference(&uctx->dx_query_mob);
uctx->dx_query_mob = NULL;
}
loc = &cbs->render_targets[bi->i1.rt_type];
break;
case vmw_ctx_binding_tex:
if (unlikely((unsigned)bi->i1.texture_stage >=
SVGA3D_NUM_TEXTURE_UNITS)) {
DRM_ERROR("Illegal texture/sampler unit %u.\n",
(unsigned) bi->i1.texture_stage);
return -EINVAL;
}
loc = &cbs->texture_units[bi->i1.texture_stage];
break;
case vmw_ctx_binding_shader:
if (unlikely((unsigned)bi->i1.shader_type >=
SVGA3D_SHADERTYPE_MAX)) {
DRM_ERROR("Illegal shader type %u.\n",
(unsigned) bi->i1.shader_type);
return -EINVAL;
}
loc = &cbs->shaders[bi->i1.shader_type];
break;
default:
BUG();
}
 
if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc);
 
loc->bi = *bi;
loc->bi.scrubbed = false;
list_add_tail(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list);
 
return 0;
}
 
/**
* vmw_context_binding_transfer: Transfer a context binding tracking entry.
*
* @cbs: Pointer to the persistent context binding state tracker.
* @bi: Information about the binding to track.
*
*/
static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi)
{
struct vmw_ctx_binding *loc;
/* Can only have one MOB per context for queries */
if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
return -EINVAL;
 
switch (bi->bt) {
case vmw_ctx_binding_rt:
loc = &cbs->render_targets[bi->i1.rt_type];
break;
case vmw_ctx_binding_tex:
loc = &cbs->texture_units[bi->i1.texture_stage];
break;
case vmw_ctx_binding_shader:
loc = &cbs->shaders[bi->i1.shader_type];
break;
default:
BUG();
}
mob->dx_query_ctx = ctx_res;
 
if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc);
if (!uctx->dx_query_mob)
uctx->dx_query_mob = vmw_dmabuf_reference(mob);
 
if (bi->res != NULL) {
loc->bi = *bi;
list_add_tail(&loc->ctx_list, &cbs->list);
list_add_tail(&loc->res_list, &bi->res->binding_head);
return 0;
}
}
 
/**
* vmw_context_binding_kill - Kill a binding on the device
* and stop tracking it.
* vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
*
* @cb: Pointer to binding tracker storage.
*
* Emits FIFO commands to scrub a binding represented by @cb.
* Then stops tracking the binding and re-initializes its storage.
* @ctx_res: The context resource
*/
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
struct vmw_dma_buffer *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
if (!cb->bi.scrubbed) {
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
cb->bi.scrubbed = true;
}
vmw_context_binding_drop(cb);
}
 
/**
* vmw_context_binding_state_kill - Kill all bindings associated with a
* struct vmw_ctx_binding state structure, and re-initialize the structure.
*
* @cbs: Pointer to the context binding state tracker.
*
* Emits commands to scrub all bindings associated with the
* context binding state tracker. Then re-initializes the whole structure.
*/
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_binding *entry, *next;
 
list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
vmw_context_binding_kill(entry);
}
 
/**
* vmw_context_binding_state_scrub - Scrub all bindings associated with a
* struct vmw_ctx_binding state structure.
*
* @cbs: Pointer to the context binding state tracker.
*
* Emits commands to scrub all bindings associated with the
* context binding state tracker.
*/
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_binding *entry;
 
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
 
/**
* vmw_context_binding_res_list_kill - Kill all bindings on a
* resource binding list
*
* @head: list head of resource binding list
*
* Kills all bindings associated with a specific resource. Typically
* called before the resource is destroyed.
*/
void vmw_context_binding_res_list_kill(struct list_head *head)
{
struct vmw_ctx_binding *entry, *next;
 
list_for_each_entry_safe(entry, next, head, res_list)
vmw_context_binding_kill(entry);
}
 
/**
* vmw_context_binding_res_list_scrub - Scrub all bindings on a
* resource binding list
*
* @head: list head of resource binding list
*
* Scrub all bindings associated with a specific resource. Typically
* called before the resource is evicted.
*/
void vmw_context_binding_res_list_scrub(struct list_head *head)
{
struct vmw_ctx_binding *entry;
 
list_for_each_entry(entry, head, res_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
 
/**
* vmw_context_binding_state_transfer - Commit staged binding info
*
* @ctx: Pointer to context to commit the staged binding info to.
* @from: Staged binding info built during execbuf.
*
* Transfers binding info from a temporary structure to the persistent
* structure in the context. This can be done once commands
*/
void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
struct vmw_ctx_binding_state *from)
{
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
struct vmw_ctx_binding *entry, *next;
container_of(ctx_res, struct vmw_user_context, res);
 
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
return uctx->dx_query_mob;
}
 
/**
* vmw_context_rebind_all - Rebind all scrubbed bindings of a context
*
* @ctx: The context resource
*
* Walks through the context binding list and rebinds all scrubbed
* resources.
*/
int vmw_context_rebind_all(struct vmw_resource *ctx)
{
struct vmw_ctx_binding *entry;
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
struct vmw_ctx_binding_state *cbs = &uctx->cbs;
int ret;
 
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (likely(!entry->bi.scrubbed))
continue;
 
if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
SVGA3D_INVALID_ID))
continue;
 
ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
if (unlikely(ret != 0))
return ret;
 
entry->bi.scrubbed = false;
}
 
return 0;
}
 
/**
* vmw_context_binding_list - Return a list of context bindings
*
* @ctx: The context resource
*
* Returns the current list of bindings of the given context. Note that
* this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}
 
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
return container_of(ctx, struct vmw_user_context, res)->man;
}
/drivers/video/drm/vmwgfx/vmwgfx_cotable.c
0,0 → 1,661
/**************************************************************************
*
* Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Treat context OTables as resources to make use of the resource
* backing MOB eviction mechanism, that is used to read back the COTable
* whenever the backing MOB is evicted.
*/
 
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include <ttm/ttm_placement.h>
#include "vmwgfx_so.h"
 
/**
* struct vmw_cotable - Context Object Table resource
*
* @res: struct vmw_resource we are deriving from.
* @ctx: non-refcounted pointer to the owning context.
* @size_read_back: Size of data read back during eviction.
* @seen_entries: Seen entries in command stream for this cotable.
* @type: The cotable type.
* @scrubbed: Whether the cotable has been scrubbed.
* @resource_list: List of resources in the cotable.
*/
struct vmw_cotable {
struct vmw_resource res;
struct vmw_resource *ctx;
size_t size_read_back;
int seen_entries;
u32 type;
bool scrubbed;
struct list_head resource_list;
};
 
/**
* struct vmw_cotable_info - Static info about cotable types
*
* @min_initial_entries: Min number of initial intries at cotable allocation
* for this cotable type.
* @size: Size of each entry.
*/
struct vmw_cotable_info {
u32 min_initial_entries;
u32 size;
void (*unbind_func)(struct vmw_private *, struct list_head *,
bool);
};
 
static const struct vmw_cotable_info co_info[] = {
{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
{1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
{1, sizeof(SVGACOTableDXQueryEntry), NULL},
/* {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub} */
};
 
/*
* Cotables with bindings that we remove must be scrubbed first,
* otherwise, the device will swap in an invalid context when we remove
* bindings before scrubbing a cotable...
*/
const SVGACOTableType vmw_cotable_scrub_order[] = {
SVGA_COTABLE_RTVIEW,
SVGA_COTABLE_DSVIEW,
SVGA_COTABLE_SRVIEW,
SVGA_COTABLE_DXSHADER,
SVGA_COTABLE_ELEMENTLAYOUT,
SVGA_COTABLE_BLENDSTATE,
SVGA_COTABLE_DEPTHSTENCIL,
SVGA_COTABLE_RASTERIZERSTATE,
SVGA_COTABLE_SAMPLER,
SVGA_COTABLE_STREAMOUTPUT,
SVGA_COTABLE_DXQUERY,
};
 
static int vmw_cotable_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_cotable_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_cotable_create(struct vmw_resource *res);
static int vmw_cotable_destroy(struct vmw_resource *res);
 
static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable,
.needs_backup = true,
.may_evict = true,
.type_name = "context guest backed object tables",
.backup_placement = &vmw_mob_placement,
.create = vmw_cotable_create,
.destroy = vmw_cotable_destroy,
.bind = vmw_cotable_bind,
.unbind = vmw_cotable_unbind,
};
 
/**
* vmw_cotable - Convert a struct vmw_resource pointer to a struct
* vmw_cotable pointer
*
* @res: Pointer to the resource.
*/
static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
{
return container_of(res, struct vmw_cotable, res);
}
 
/**
* vmw_cotable_destroy - Cotable resource destroy callback
*
* @res: Pointer to the cotable resource.
*
* There is no device cotable destroy command, so this function only
* makes sure that the resource id is set to invalid.
*/
static int vmw_cotable_destroy(struct vmw_resource *res)
{
res->id = -1;
return 0;
}
 
/**
* vmw_cotable_unscrub - Undo a cotable unscrub operation
*
* @res: Pointer to the cotable resource
*
* This function issues commands to (re)bind the cotable to
* its backing mob, which needs to be validated and reserved at this point.
* This is identical to bind() except the function interface looks different.
*/
static int vmw_cotable_unscrub(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = &res->backup->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetCOTable body;
} *cmd;
 
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
lockdep_assert_held(&bo->resv->lock.base);
 
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
if (!cmd) {
DRM_ERROR("Failed reserving FIFO space for cotable "
"binding.\n");
return -ENOMEM;
}
 
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
cmd->body.mobid = bo->mem.start;
cmd->body.validSizeInBytes = vcotbl->size_read_back;
 
vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
vcotbl->scrubbed = false;
 
return 0;
}
 
/**
* vmw_cotable_bind - Undo a cotable unscrub operation
*
* @res: Pointer to the cotable resource
* @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
* for convenience / fencing.
*
* This function issues commands to (re)bind the cotable to
* its backing mob, which needs to be validated and reserved at this point.
*/
static int vmw_cotable_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
/*
* The create() callback may have changed @res->backup without
* the caller noticing, and with val_buf->bo still pointing to
* the old backup buffer. Although hackish, and not used currently,
* take the opportunity to correct the value here so that it's not
* misused in the future.
*/
val_buf->bo = &res->backup->base;
 
return vmw_cotable_unscrub(res);
}
 
/**
* vmw_cotable_scrub - Scrub the cotable from the device.
*
* @res: Pointer to the cotable resource.
* @readback: Whether initiate a readback of the cotable data to the backup
* buffer.
*
* In some situations (context swapouts) it might be desirable to make the
* device forget about the cotable without performing a full unbind. A full
* unbind requires reserved backup buffers and it might not be possible to
* reserve them due to locking order violation issues. The vmw_cotable_scrub
* function implements a partial unbind() without that requirement but with the
* following restrictions.
* 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
* be called.
* 2) Before the cotable backing buffer is used by the CPU, or during the
* resource destruction, vmw_cotable_unbind() must be called.
*/
int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv;
size_t submit_size;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXReadbackCOTable body;
} *cmd0;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetCOTable body;
} *cmd1;
 
if (vcotbl->scrubbed)
return 0;
 
if (co_info[vcotbl->type].unbind_func)
co_info[vcotbl->type].unbind_func(dev_priv,
&vcotbl->resource_list,
readback);
submit_size = sizeof(*cmd1);
if (readback)
submit_size += sizeof(*cmd0);
 
cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
if (!cmd1) {
DRM_ERROR("Failed reserving FIFO space for cotable "
"unbinding.\n");
return -ENOMEM;
}
 
vcotbl->size_read_back = 0;
if (readback) {
cmd0 = (void *) cmd1;
cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
cmd0->header.size = sizeof(cmd0->body);
cmd0->body.cid = vcotbl->ctx->id;
cmd0->body.type = vcotbl->type;
cmd1 = (void *) &cmd0[1];
vcotbl->size_read_back = res->backup_size;
}
cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.cid = vcotbl->ctx->id;
cmd1->body.type = vcotbl->type;
cmd1->body.mobid = SVGA3D_INVALID_ID;
cmd1->body.validSizeInBytes = 0;
vmw_fifo_commit_flush(dev_priv, submit_size);
vcotbl->scrubbed = true;
 
/* Trigger a create() on next validate. */
res->id = -1;
 
return 0;
}
 
/**
* vmw_cotable_unbind - Cotable resource unbind callback
*
* @res: Pointer to the cotable resource.
* @readback: Whether to read back cotable data to the backup buffer.
* val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
* for convenience / fencing.
*
* Unbinds the cotable from the device and fences the backup buffer.
*/
static int vmw_cotable_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
 
if (list_empty(&res->mob_head))
return 0;
 
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
lockdep_assert_held(&bo->resv->lock.base);
 
mutex_lock(&dev_priv->binding_mutex);
if (!vcotbl->scrubbed)
vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
mutex_unlock(&dev_priv->binding_mutex);
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_fence_single_bo(bo, fence);
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
 
return 0;
}
 
/**
* vmw_cotable_readback - Read back a cotable without unbinding.
*
* @res: The cotable resource.
*
* Reads back a cotable to its backing mob without scrubbing the MOB from
* the cotable. The MOB is fenced for subsequent CPU access.
*/
static int vmw_cotable_readback(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXReadbackCOTable body;
} *cmd;
struct vmw_fence_obj *fence;
 
if (!vcotbl->scrubbed) {
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
SVGA3D_INVALID_ID);
if (!cmd) {
DRM_ERROR("Failed reserving FIFO space for cotable "
"readback.\n");
return -ENOMEM;
}
cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
vcotbl->size_read_back = res->backup_size;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
 
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_fence_single_bo(&res->backup->base, fence);
vmw_fence_obj_unreference(&fence);
 
return 0;
}
 
/**
* vmw_cotable_resize - Resize a cotable.
*
* @res: The cotable resource.
* @new_size: The new size.
*
* Resizes a cotable and binds the new backup buffer.
* On failure the cotable is left intact.
* Important! This function may not fail once the MOB switch has been
* committed to hardware. That would put the device context in an
* invalid state which we can't currently recover from.
*/
static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_dma_buffer *buf, *old_buf = res->backup;
struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
size_t old_size = res->backup_size;
size_t old_size_read_back = vcotbl->size_read_back;
size_t cur_size_read_back;
struct ttm_bo_kmap_obj old_map, new_map;
int ret;
size_t i;
 
ret = vmw_cotable_readback(res);
if (ret)
return ret;
 
cur_size_read_back = vcotbl->size_read_back;
vcotbl->size_read_back = old_size_read_back;
 
/*
* While device is processing, Allocate and reserve a buffer object
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
 
ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
true, vmw_dmabuf_bo_free);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
}
 
bo = &buf->base;
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
 
ret = ttm_bo_wait(old_bo, false, false, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed waiting for cotable unbind.\n");
goto out_wait;
}
 
/*
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
for (i = 0; i < old_bo->num_pages; ++i) {
bool dummy;
 
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed mapping old COTable on resize.\n");
goto out_wait;
}
ret = ttm_bo_kmap(bo, i, 1, &new_map);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed mapping new COTable on resize.\n");
goto out_map_new;
}
memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
ttm_kmap_obj_virtual(&old_map, &dummy),
PAGE_SIZE);
ttm_bo_kunmap(&new_map);
ttm_bo_kunmap(&old_map);
}
 
/* Unpin new buffer, and switch backup buffers. */
ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait;
}
 
res->backup = buf;
res->backup_size = new_size;
vcotbl->size_read_back = cur_size_read_back;
 
/*
* Now tell the device to switch. If this fails, then we need to
* revert the full resize.
*/
ret = vmw_cotable_unscrub(res);
if (ret) {
DRM_ERROR("Failed switching COTable backup buffer.\n");
res->backup = old_buf;
res->backup_size = old_size;
vcotbl->size_read_back = old_size_read_back;
goto out_wait;
}
 
/* Let go of the old mob. */
list_del(&res->mob_head);
list_add_tail(&res->mob_head, &buf->res_list);
vmw_dmabuf_unreference(&old_buf);
res->id = vcotbl->type;
 
return 0;
 
out_map_new:
ttm_bo_kunmap(&old_map);
out_wait:
ttm_bo_unreserve(bo);
vmw_dmabuf_unreference(&buf);
 
return ret;
}
 
/**
* vmw_cotable_create - Cotable resource create callback
*
* @res: Pointer to a cotable resource.
*
* There is no separate create command for cotables, so this callback, which
* is called before bind() in the validation sequence is instead used for two
* things.
* 1) Unscrub the cotable if it is scrubbed and still attached to a backup
* buffer, that is, if @res->mob_head is non-empty.
* 2) Resize the cotable if needed.
*/
static int vmw_cotable_create(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
size_t new_size = res->backup_size;
size_t needed_size;
int ret;
 
/* Check whether we need to resize the cotable */
needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
while (needed_size > new_size)
new_size *= 2;
 
if (likely(new_size <= res->backup_size)) {
if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
ret = vmw_cotable_unscrub(res);
if (ret)
return ret;
}
res->id = vcotbl->type;
return 0;
}
 
return vmw_cotable_resize(res, new_size);
}
 
/**
* vmw_hw_cotable_destroy - Cotable hw_destroy callback
*
* @res: Pointer to a cotable resource.
*
* The final (part of resource destruction) destroy callback.
*/
static void vmw_hw_cotable_destroy(struct vmw_resource *res)
{
(void) vmw_cotable_destroy(res);
}
 
static size_t cotable_acc_size;
 
/**
* vmw_cotable_free - Cotable resource destructor
*
* @res: Pointer to a cotable resource.
*/
static void vmw_cotable_free(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
 
kfree(res);
ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
}
 
/**
* vmw_cotable_alloc - Create a cotable resource
*
* @dev_priv: Pointer to a device private struct.
* @ctx: Pointer to the context resource.
* The cotable resource will not add a refcount.
* @type: The cotable type.
*/
struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
struct vmw_resource *ctx,
u32 type)
{
struct vmw_cotable *vcotbl;
int ret;
u32 num_entries;
 
if (unlikely(cotable_acc_size == 0))
cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
cotable_acc_size, false, true);
if (unlikely(ret))
return ERR_PTR(ret);
 
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
if (unlikely(vcotbl == NULL)) {
ret = -ENOMEM;
goto out_no_alloc;
}
 
ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
vmw_cotable_free, &vmw_cotable_func);
if (unlikely(ret != 0))
goto out_no_init;
 
INIT_LIST_HEAD(&vcotbl->resource_list);
vcotbl->res.id = type;
vcotbl->res.backup_size = PAGE_SIZE;
num_entries = PAGE_SIZE / co_info[type].size;
if (num_entries < co_info[type].min_initial_entries) {
vcotbl->res.backup_size = co_info[type].min_initial_entries *
co_info[type].size;
vcotbl->res.backup_size =
(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
}
 
vcotbl->scrubbed = true;
vcotbl->seen_entries = -1;
vcotbl->type = type;
vcotbl->ctx = ctx;
 
vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
 
return &vcotbl->res;
 
out_no_init:
kfree(vcotbl);
out_no_alloc:
ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
return ERR_PTR(ret);
}
 
/**
* vmw_cotable_notify - Notify the cotable about an item creation
*
* @res: Pointer to a cotable resource.
* @id: Item id.
*/
int vmw_cotable_notify(struct vmw_resource *res, int id)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
 
if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
(unsigned) vcotbl->type, id);
return -EINVAL;
}
 
if (vcotbl->seen_entries < id) {
/* Trigger a call to create() on next validate */
res->id = -1;
vcotbl->seen_entries = id;
}
 
return 0;
}
 
/**
* vmw_cotable_add_view - add a view to the cotable's list of active views.
*
* @res: pointer struct vmw_resource representing the cotable.
* @head: pointer to the struct list_head member of the resource, dedicated
* to the cotable active resource list.
*/
void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
{
struct vmw_cotable *vcotbl =
container_of(res, struct vmw_cotable, res);
 
list_add_tail(head, &vcotbl->resource_list);
}
/drivers/video/drm/vmwgfx/vmwgfx_dmabuf.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
32,22 → 32,17
 
 
/**
* vmw_dmabuf_to_placement - Validate a buffer to placement.
* vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer if true.
* @placement: The placement to pin it.
* @interruptible: Use interruptible wait.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
struct ttm_placement *placement,
bool interruptible)
55,9 → 50,9
struct ttm_buffer_object *bo = &buf->base;
int ret;
 
// ret = ttm_write_lock(&vmaster->lock, interruptible);
// if (unlikely(ret != 0))
// return ret;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
 
vmw_execbuf_release_pinned_bo(dev_priv);
 
66,21 → 61,21
goto err;
 
ret = ttm_bo_validate(bo, placement, interruptible, false);
if (!ret)
vmw_bo_pin_reserved(buf, true);
 
ttm_bo_unreserve(bo);
 
err:
// ttm_write_unlock(&vmaster->lock);
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
 
/**
* vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
* vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
90,19 → 85,17
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement;
int ret;
 
// ret = ttm_write_lock(&vmaster->lock, interruptible);
// if (unlikely(ret != 0))
// return ret;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
 
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
 
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
109,114 → 102,87
if (unlikely(ret != 0))
goto err;
 
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
 
if (pin)
placement = &vmw_vram_gmr_ne_placement;
else
placement = &vmw_vram_gmr_placement;
 
ret = ttm_bo_validate(bo, placement, interruptible, false);
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto err_unreserve;
goto out_unreserve;
 
ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
 
/**
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
out_unreserve:
if (!ret)
vmw_bo_pin_reserved(buf, true);
 
if (pin)
placement = &vmw_vram_ne_placement;
else
placement = &vmw_vram_placement;
 
ret = ttm_bo_validate(bo, placement, interruptible, false);
 
err_unreserve:
ttm_bo_unreserve(bo);
err:
// ttm_write_unlock(&vmaster->lock);
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
 
/**
* vmw_dmabuf_to_vram - Move a buffer to vram.
* vmw_dmabuf_pin_in_vram - Move a buffer to vram.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
bool interruptible)
{
struct ttm_placement *placement;
 
if (pin)
placement = &vmw_vram_ne_placement;
else
placement = &vmw_vram_placement;
 
return vmw_dmabuf_to_placement(dev_priv, buf,
placement,
return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
interruptible);
}
 
/**
* vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
* vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer in vram if true.
* @buf: DMA buffer to pin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
 
if (pin)
placement = vmw_vram_ne_placement;
else
placement = vmw_vram_placement;
placement.lpfn = bo->num_pages;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
placement.busy_placement = &place;
 
// ret = ttm_write_lock(&vmaster->lock, interruptible);
// if (unlikely(ret != 0))
// return ret;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
 
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
 
/* Is this buffer already in vram but not at the start of it? */
/*
* Is this buffer already in vram but not at the start of it?
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
224,23 → 190,22
 
ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
/* For some reason we didn't up at the start of vram */
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
 
ttm_bo_unreserve(bo);
err_unlock:
// ttm_write_unlock(&vmaster->lock);
ttm_write_unlock(&dev_priv->reservation_sem);
 
return ret;
}
 
 
/**
* vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
* vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* This function takes the reservation_sem in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
253,17 → 218,26
struct vmw_dma_buffer *buf,
bool interruptible)
{
/*
* We could in theory early out if the buffer is
* unpinned but we need to lock and reserve the buffer
* anyways so we don't gain much by that.
*/
return vmw_dmabuf_to_placement(dev_priv, buf,
&vmw_evictable_placement,
interruptible);
struct ttm_buffer_object *bo = &buf->base;
int ret;
 
ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
 
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
 
vmw_bo_pin_reserved(buf, false);
 
ttm_bo_unreserve(bo);
 
err:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
 
 
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
* of a buffer.
285,29 → 259,41
 
 
/**
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
* vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
*
* @bo: The buffer object. Must be reserved.
* @vbo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{
uint32_t pl_flags;
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
 
lockdep_assert_held(&bo->resv->lock.base);
 
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
if (pin) {
if (vbo->pin_count++ > 0)
return;
} else {
WARN_ON(vbo->pin_count <= 0);
if (--vbo->pin_count > 0)
return;
}
 
pl.fpfn = 0;
pl.lpfn = 0;
pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
if (pin)
pl_flags |= TTM_PL_FLAG_NO_EVICT;
pl.flags |= TTM_PL_FLAG_NO_EVICT;
 
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
placement.placement = &pl_flags;
placement.placement = &pl;
 
ret = ttm_bo_validate(bo, &placement, false, true);
 
/drivers/video/drm/vmwgfx/vmwgfx_drv.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
28,10 → 28,11
 
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_binding.h"
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
//#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h>
 
#define VMWGFX_DRIVER_NAME "vmwgfx"
127,6 → 128,9
#define DRM_IOCTL_VMW_SYNCCPU \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
struct drm_vmw_synccpu_arg)
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
struct drm_vmw_context_arg)
 
/**
* The core DRM version of this macro doesn't account for
134,7 → 138,7
*/
 
#define VMW_IOCTL_DEF(ioctl, func, flags) \
[DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
[DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
 
/**
* Ioctl definitions.
142,70 → 146,73
 
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
DRM_MASTER | DRM_CONTROL_ALLOW),
 
VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
DRM_MASTER | DRM_CONTROL_ALLOW),
VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
DRM_MASTER | DRM_CONTROL_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
DRM_MASTER | DRM_CONTROL_ALLOW),
 
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
vmw_fence_obj_signaled_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
 
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
DRM_MASTER | DRM_AUTH),
VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
vmw_present_readback_ioctl,
DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
DRM_MASTER | DRM_AUTH),
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_UNLOCKED),
DRM_MASTER),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
};
#endif
 
276,6 → 283,8
DRM_INFO(" Command Buffers 2.\n");
if (capabilities & SVGA_CAP_GBOBJECTS)
DRM_INFO(" Guest Backed Resources.\n");
if (capabilities & SVGA_CAP_DX)
DRM_INFO(" DX Features.\n");
}
 
/**
294,30 → 303,31
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
struct ttm_buffer_object *bo;
struct vmw_dma_buffer *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
 
/*
* Create the bo as pinned, so that a tryreserve will
* Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
ret = ttm_bo_create(&dev_priv->bdev,
PAGE_SIZE,
ttm_bo_type_device,
&vmw_sys_ne_placement,
0, false, NULL,
&bo);
vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
if (!vbo)
return -ENOMEM;
 
ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
&vmw_sys_ne_placement, false,
&vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
return ret;
 
ret = ttm_bo_reserve(bo, false, true, false, NULL);
ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true);
 
ret = ttm_bo_kmap(bo, 0, 1, &map);
ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
325,18 → 335,55
result->result32 = 0xff;
ttm_bo_kunmap(&map);
}
vmw_bo_pin(bo, false);
ttm_bo_unreserve(bo);
vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->base);
 
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
ttm_bo_unref(&bo);
vmw_dmabuf_unreference(&vbo);
} else
dev_priv->dummy_query_bo = bo;
dev_priv->dummy_query_bo = vbo;
 
return ret;
}
 
/**
* vmw_request_device_late - Perform late device setup
*
* @dev_priv: Pointer to device private.
*
* This function performs setup of otables and enables large command
* buffer submission. These tasks are split out to a separate function
* because it reverts vmw_release_device_early and is intended to be used
* by an error path in the hibernation code.
*/
static int vmw_request_device_late(struct vmw_private *dev_priv)
{
int ret;
 
if (dev_priv->has_mob) {
ret = vmw_otables_setup(dev_priv);
if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize "
"guest Memory OBjects.\n");
return ret;
}
}
 
if (dev_priv->cman) {
ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
256*4096, 2*4096);
if (ret) {
struct vmw_cmdbuf_man *man = dev_priv->cman;
 
dev_priv->cman = NULL;
vmw_cmdbuf_man_destroy(man);
}
}
 
return 0;
}
 
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
346,23 → 393,47
DRM_ERROR("Unable to initialize FIFO.\n");
return ret;
}
// vmw_fence_fifo_up(dev_priv->fman);
// ret = vmw_dummy_query_bo_create(dev_priv);
// if (unlikely(ret != 0))
// goto out_no_query_bo;
// vmw_dummy_query_bo_prepare(dev_priv);
vmw_fence_fifo_up(dev_priv->fman);
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
if (IS_ERR(dev_priv->cman)) {
dev_priv->cman = NULL;
dev_priv->has_dx = false;
}
 
ret = vmw_request_device_late(dev_priv);
if (ret)
goto out_no_mob;
 
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
 
return 0;
 
out_no_query_bo:
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
// (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
vmw_otables_takedown(dev_priv);
}
if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
}
 
static void vmw_release_device(struct vmw_private *dev_priv)
/**
* vmw_release_device_early - Early part of fifo takedown.
*
* @dev_priv: Pointer to device private struct.
*
* This is the first part of command submission takedown, to be called before
* buffer management is taken down.
*/
static void vmw_release_device_early(struct vmw_private *dev_priv)
{
/*
* Previous destructions should've released
371,70 → 442,33
 
BUG_ON(dev_priv->pinned_bo != NULL);
 
ttm_bo_unref(&dev_priv->dummy_query_bo);
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
 
 
/**
* Increase the 3d resource refcount.
* If the count was prevously zero, initialize the fifo, switching to svga
* mode. Note that the master holds a ref as well, and may request an
* explicit switch to svga mode if fb is not running, using @unhide_svga.
*/
int vmw_3d_resource_inc(struct vmw_private *dev_priv,
bool unhide_svga)
{
int ret = 0;
 
mutex_lock(&dev_priv->release_mutex);
if (unlikely(dev_priv->num_3d_resources++ == 0)) {
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
} else if (unhide_svga) {
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE);
mutex_unlock(&dev_priv->hw_mutex);
if (dev_priv->has_mob) {
ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
vmw_otables_takedown(dev_priv);
}
 
mutex_unlock(&dev_priv->release_mutex);
return ret;
}
 
/**
* Decrease the 3d resource refcount.
* If the count reaches zero, disable the fifo, switching to vga mode.
* Note that the master holds a refcount as well, and may request an
* explicit switch to vga mode when it releases its refcount to account
* for the situation of an X server vt switch to VGA with 3d resources
* active.
* vmw_release_device_late - Late part of fifo takedown.
*
* @dev_priv: Pointer to device private struct.
*
* This is the last part of the command submission takedown, to be called when
* command submission is no longer needed. It may wait on pending fences.
*/
void vmw_3d_resource_dec(struct vmw_private *dev_priv,
bool hide_svga)
static void vmw_release_device_late(struct vmw_private *dev_priv)
{
int32_t n3d;
vmw_fence_fifo_down(dev_priv->fman);
if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
 
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
else if (hide_svga) {
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE);
mutex_unlock(&dev_priv->hw_mutex);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
 
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
 
BUG_ON(n3d < 0);
}
 
/**
* Sets the initial_[width|height] fields on the given vmw_private.
*
490,10 → 524,55
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
#ifdef CONFIG_X86
const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
 
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_enabled) {
dev_priv->map_mode = vmw_dma_map_populate;
goto out_fixup;
}
#endif
 
if (!(vmw_force_iommu || vmw_force_coherent)) {
dev_priv->map_mode = vmw_dma_phys;
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
 
dev_priv->map_mode = vmw_dma_map_populate;
 
if (dma_ops->sync_single_for_cpu)
dev_priv->map_mode = vmw_dma_alloc_coherent;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl() == 0)
dev_priv->map_mode = vmw_dma_map_populate;
#endif
 
#ifdef CONFIG_INTEL_IOMMU
out_fixup:
#endif
if (dev_priv->map_mode == vmw_dma_map_populate &&
vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
 
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
 
#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
/*
* No coherent page pool
*/
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
return -EINVAL;
#endif
 
#else /* CONFIG_X86 */
dev_priv->map_mode = vmw_dma_map_populate;
#endif /* CONFIG_X86 */
 
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 
return 0;
}
 
543,12 → 622,15
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
spin_lock_init(&dev_priv->svga_lock);
 
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
559,7 → 641,7
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
dev_priv->fifo_queue_waiters = 0;
 
dev_priv->used_memory_size = 0;
 
569,14 → 651,11
 
dev_priv->enable_fb = enable_fbdev;
 
mutex_lock(&dev_priv->hw_mutex);
 
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) {
ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
mutex_unlock(&dev_priv->hw_mutex);
goto out_err0;
}
 
622,22 → 701,31
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
dev_priv->max_mob_size =
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
} else
dev_priv->stdu_max_width =
vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
dev_priv->stdu_max_height =
vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
 
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
dev_priv->texture_max_width = vmw_read(dev_priv,
SVGA_REG_DEV_CAP);
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
dev_priv->texture_max_height = vmw_read(dev_priv,
SVGA_REG_DEV_CAP);
} else {
dev_priv->texture_max_width = 8192;
dev_priv->texture_max_height = 8192;
dev_priv->prim_bb_mem = dev_priv->vram_size;
}
 
vmw_print_capabilities(dev_priv->capabilities);
 
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0)) {
mutex_unlock(&dev_priv->hw_mutex);
if (unlikely(ret != 0))
goto out_err0;
}
 
if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
dev_priv->prim_bb_mem = dev_priv->vram_size;
 
mutex_unlock(&dev_priv->hw_mutex);
 
vmw_print_capabilities(dev_priv->capabilities);
 
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
661,43 → 749,6
vmw_master_init(&dev_priv->fbdev_master);
dev_priv->active_master = &dev_priv->fbdev_master;
 
 
ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver,
NULL,
VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
goto out_err1;
}
 
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
(dev_priv->vram_size >> PAGE_SHIFT));
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_err2;
}
 
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
 
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
dev_priv->has_mob = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
DRM_INFO("No MOB memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
}
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
dev_priv->mmio_size);
 
728,7 → 779,22
dev->dev_private = dev_priv;
 
#if 0
ret = pci_request_regions(dev->pdev, "vmwgfx probe");
dev_priv->stealth = (ret != 0);
if (dev_priv->stealth) {
/**
* Request at least the mmio PCI resource.
*/
 
DRM_INFO("It appears like vesafb is loaded. "
"Ignore above error if any.\n");
ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
if (unlikely(ret != 0)) {
DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
goto out_no_device;
}
}
 
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret != 0) {
736,6 → 802,7
goto out_no_irq;
}
}
#endif
 
dev_priv->fman = vmw_fence_manager_init(dev_priv);
if (unlikely(dev_priv->fman == NULL)) {
743,30 → 810,91
goto out_no_fman;
}
 
vmw_kms_save_vga(dev_priv);
#endif
ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver,
NULL,
VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
goto out_no_bdev;
}
 
/* Start kms and overlay systems, needs fifo. */
/*
* Enable VRAM, but initially don't use it until SVGA is enabled and
* unhidden.
*/
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
(dev_priv->vram_size >> PAGE_SHIFT));
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_no_vram;
}
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
 
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
dev_priv->has_mob = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
DRM_INFO("No MOB memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
}
}
 
if (dev_priv->has_mob) {
spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
spin_unlock(&dev_priv->cap_lock);
}
 
 
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
vmw_overlay_init(dev_priv);
 
if (dev_priv->enable_fb) {
ret = vmw_3d_resource_inc(dev_priv, true);
if (unlikely(ret != 0))
ret = vmw_request_device(dev_priv);
if (ret)
goto out_no_fifo;
// vmw_fb_init(dev_priv);
}
 
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
 
system_wq = alloc_ordered_workqueue("vmwgfx", 0);
main_device = dev;
 
if (dev_priv->enable_fb) {
vmw_fifo_resource_inc(dev_priv);
vmw_svga_enable(dev_priv);
vmw_fb_init(dev_priv);
}
LINE();
 
return 0;
 
out_no_fifo:
// vmw_overlay_close(dev_priv);
// vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
// vmw_kms_restore_vga(dev_priv);
// if (dev_priv->has_mob)
// (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
// if (dev_priv->has_gmr)
// (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
// (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
// (void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
// vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
// if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
779,20 → 907,15
out_no_device:
// ttm_object_device_release(&dev_priv->tdev);
out_err4:
// iounmap(dev_priv->mmio_virt);
// memunmap(dev_priv->mmio_virt);
out_err3:
// arch_phys_wc_del(dev_priv->mmio_mtrr);
// if (dev_priv->has_gmr)
// (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
// (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_err2:
// (void)ttm_bo_device_release(&dev_priv->bdev);
out_err1:
// vmw_ttm_global_release(dev_priv);
out_err0:
// for (i = vmw_res_context; i < vmw_res_max; ++i)
// idr_destroy(&dev_priv->res_idr[i]);
 
// if (dev_priv->ctx.staged_bindings)
// vmw_binding_state_free(dev_priv->ctx.staged_bindings);
kfree(dev_priv);
return ret;
}
807,15 → 930,26
 
if (dev_priv->ctx.res_ht_initialized)
drm_ht_remove(&dev_priv->ctx.res_ht);
if (dev_priv->ctx.cmd_bounce)
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
vmw_fb_off(dev_priv);
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, false);
vmw_fifo_resource_dec(dev_priv);
vmw_svga_disable(dev_priv);
}
 
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
 
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 
vmw_release_device_early(dev_priv);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
(void) ttm_bo_device_release(&dev_priv->bdev);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
825,14 → 959,9
pci_release_regions(dev->pdev);
 
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
 
for (i = vmw_res_context; i < vmw_res_max; ++i)
897,12 → 1026,67
}
 
#if 0
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
static struct vmw_master *vmw_master_check(struct drm_device *dev,
struct drm_file *file_priv,
unsigned int flags)
{
int ret;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster;
 
if (file_priv->minor->type != DRM_MINOR_LEGACY ||
!(flags & DRM_AUTH))
return NULL;
 
ret = mutex_lock_interruptible(&dev->master_mutex);
if (unlikely(ret != 0))
return ERR_PTR(-ERESTARTSYS);
 
if (file_priv->is_master) {
mutex_unlock(&dev->master_mutex);
return NULL;
}
 
/*
* Check if we were previously master, but now dropped. In that
* case, allow at least render node functionality.
*/
if (vmw_fp->locked_master) {
mutex_unlock(&dev->master_mutex);
 
if (flags & DRM_RENDER_ALLOW)
return NULL;
 
DRM_ERROR("Dropped master trying to access ioctl that "
"requires authentication.\n");
return ERR_PTR(-EACCES);
}
mutex_unlock(&dev->master_mutex);
 
/*
* Take the TTM lock. Possibly sleep waiting for the authenticating
* master to become master again, or for a SIGTERM if the
* authenticating master exits.
*/
vmaster = vmw_master(file_priv->master);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
vmaster = ERR_PTR(ret);
 
return vmaster;
}
 
static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg,
long (*ioctl_func)(struct file *, unsigned int,
unsigned long))
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
unsigned int nr = DRM_IOCTL_NR(cmd);
struct vmw_master *vmaster;
unsigned int flags;
long ret;
 
/*
* Do extra checking on driver private ioctls.
913,43 → 1097,70
const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
 
if (unlikely(ioctl->cmd_drv != cmd)) {
if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
if (unlikely(ret != 0))
return ret;
 
if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
goto out_io_encoding;
 
return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
_IOC_SIZE(cmd));
}
 
if (unlikely(ioctl->cmd != cmd))
goto out_io_encoding;
 
flags = ioctl->flags;
} else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL;
 
vmaster = vmw_master_check(dev, file_priv, flags);
if (IS_ERR(vmaster)) {
ret = PTR_ERR(vmaster);
 
if (ret != -ERESTARTSYS)
DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
nr, ret);
return ret;
}
 
ret = ioctl_func(filp, cmd, arg);
if (vmaster)
ttm_read_unlock(&vmaster->lock);
 
return ret;
 
out_io_encoding:
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
 
return -EINVAL;
}
 
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
}
 
return drm_ioctl(filp, cmd, arg);
#ifdef CONFIG_COMPAT
static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
}
#endif
 
static void vmw_lastclose(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_mode_set set;
int ret;
 
set.x = 0;
set.y = 0;
set.fb = NULL;
set.mode = NULL;
set.connectors = NULL;
set.num_connectors = 0;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
set.crtc = crtc;
ret = drm_mode_set_config_internal(&set);
WARN_ON(ret != 0);
}
 
}
#endif
 
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
INIT_LIST_HEAD(&vmaster->fb_surf);
mutex_init(&vmaster->fb_surf_mutex);
}
 
static int vmw_master_create(struct drm_device *dev,
988,29 → 1199,13
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
 
if (!dev_priv->enable_fb) {
ret = vmw_3d_resource_inc(dev_priv, true);
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
mutex_unlock(&dev_priv->hw_mutex);
}
 
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
if (unlikely(ret != 0))
goto out_no_active_lock;
return ret;
 
ttm_lock_set_kill(&active->lock, true, SIGTERM);
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0)) {
DRM_ERROR("Unable to clean VRAM on "
"master drop.\n");
}
 
dev_priv->active_master = NULL;
}
 
1024,17 → 1219,7
dev_priv->active_master = vmaster;
 
return 0;
 
out_no_active_lock:
if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
}
return ret;
}
 
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv,
1052,23 → 1237,15
 
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_kms_legacy_hotspot_clear(dev_priv);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
drm_master_put(&vmw_fp->locked_master);
}
 
vmw_execbuf_release_pinned_bo(dev_priv);
 
if (!dev_priv->enable_fb) {
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0))
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
}
if (!dev_priv->enable_fb)
vmw_svga_disable(dev_priv);
 
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1077,8 → 1254,36
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
}
#endif
/**
* __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
*
* @dev_priv: Pointer to device private struct.
* Needs the reservation sem to be held in non-exclusive mode.
*/
static void __vmw_svga_enable(struct vmw_private *dev_priv)
{
spin_lock(&dev_priv->svga_lock);
if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
}
spin_unlock(&dev_priv->svga_lock);
}
 
/**
* vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
*
* @dev_priv: Pointer to device private struct.
*/
void vmw_svga_enable(struct vmw_private *dev_priv)
{
ttm_read_lock(&dev_priv->reservation_sem, false);
__vmw_svga_enable(dev_priv);
ttm_read_unlock(&dev_priv->reservation_sem);
}
 
#if 0
static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
1094,23 → 1299,26
 
switch (val) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
if (dev_priv->enable_fb)
vmw_fb_off(dev_priv);
ttm_suspend_lock(&dev_priv->reservation_sem);
 
/**
/*
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev);
 
vmw_fence_fifo_down(dev_priv->fman);
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
case PM_POST_RESTORE:
vmw_fence_fifo_up(dev_priv->fman);
ttm_suspend_unlock(&dev_priv->reservation_sem);
 
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
break;
case PM_RESTORE_PREPARE:
break;
1120,20 → 1328,13
return 0;
}
 
/**
* These might not be needed with the virtual SVGA device.
*/
 
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
 
if (dev_priv->num_3d_resources != 0) {
DRM_INFO("Can't suspend or hibernate "
"while 3D resources are active.\n");
if (dev_priv->refuse_hibernation)
return -EBUSY;
}
 
pci_save_state(pdev);
pci_disable_device(pdev);
1165,31 → 1366,30
return vmw_pci_resume(pdev);
}
 
static int vmw_pm_prepare(struct device *kdev)
static int vmw_pm_freeze(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
 
/**
* Release 3d reference held by fbdev and potentially
* stop fifo.
*/
dev_priv->suspended = true;
if (dev_priv->enable_fb)
vmw_3d_resource_dec(dev_priv, true);
vmw_fifo_resource_dec(dev_priv);
 
if (dev_priv->num_3d_resources != 0) {
 
DRM_INFO("Can't suspend or hibernate "
"while 3D resources are active.\n");
 
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
if (dev_priv->enable_fb)
vmw_3d_resource_inc(dev_priv, true);
vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspended = false;
return -EBUSY;
}
 
if (dev_priv->enable_fb)
__vmw_svga_disable(dev_priv);
vmw_release_device_late(dev_priv);
 
return 0;
}
 
1205,9 → 1405,9
.irq_postinstall = vmw_irq_postinstall,
// .irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
// .get_vblank_counter = vmw_get_vblank_counter,
// .enable_vblank = vmw_enable_vblank,
// .disable_vblank = vmw_disable_vblank,
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
// .ioctls = vmw_ioctls,
// .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
.open = vmw_driver_open,
/drivers/video/drm/vmwgfx/vmwgfx_drv.h
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
32,7 → 32,6
#include <drm/drmP.h>
#include <drm/vmwgfx_drm.h>
#include <drm/drm_hashtab.h>
#include <linux/scatterlist.h>
//#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
41,10 → 40,10
//#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
 
#define VMWGFX_DRIVER_DATE "20140704"
#define VMWGFX_DRIVER_DATE "20150810"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 6
#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_DRIVER_MINOR 9
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
51,7 → 50,7
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
 
/*
* Perhaps we should have sysfs entries for these.
60,6 → 59,8
#define VMWGFX_NUM_GB_SHADER 20000
#define VMWGFX_NUM_GB_SURFACE 32768
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
#define VMWGFX_NUM_DXCONTEXT 256
#define VMWGFX_NUM_DXQUERY 512
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
VMWGFX_NUM_GB_SHADER +\
VMWGFX_NUM_GB_SURFACE +\
76,8 → 77,6
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
 
#define ioread32(addr) readl(addr)
 
static inline void outl(u32 v, u16 port)
{
asm volatile("outl %0,%1" : : "a" (v), "dN" (port));
88,10 → 87,8
asm volatile("inl %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
 
struct vmw_fpriv {
// struct drm_master *locked_master;
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
bool gb_aware;
100,6 → 97,9
struct vmw_dma_buffer {
struct ttm_buffer_object base;
struct list_head res_list;
s32 pin_count;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
};
 
/**
128,6 → 128,7
bool backup_dirty; /* Protected by backup buffer reserved */
struct vmw_dma_buffer *backup;
unsigned long backup_offset;
unsigned long pin_count; /* Protected by resource reserved */
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
145,6 → 146,9
vmw_res_surface,
vmw_res_stream,
vmw_res_shader,
vmw_res_dx_context,
vmw_res_cotable,
vmw_res_view,
vmw_res_max
};
 
152,7 → 156,8
* Resources that are managed using command streams.
*/
enum vmw_cmdbuf_res_type {
vmw_cmdbuf_res_compat_shader
vmw_cmdbuf_res_shader,
vmw_cmdbuf_res_view
};
 
struct vmw_cmdbuf_res_manager;
175,11 → 180,13
struct drm_vmw_size *sizes;
uint32_t num_sizes;
bool scanout;
uint32_t array_size;
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
struct vmw_surface_offset *offsets;
SVGA3dTextureFilter autogen_filter;
uint32_t multisample_count;
struct list_head view_list;
};
 
struct vmw_marker_queue {
191,8 → 198,8
 
struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
__le32 *static_buffer;
u32 *dynamic_buffer;
u32 *static_buffer;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
199,6 → 206,7
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
struct vmw_marker_queue marker_queue;
bool dx;
};
 
struct vmw_relocation {
242,7 → 250,7
* device-specific information.
*
* @sgt: Pointer to a struct sg_table with binding information
* @num_regions: Number of regions with device-address contigous pages
* @num_regions: Number of regions with device-address contiguous pages
*/
struct vmw_sg_table {
enum vmw_dma_map_mode mode;
279,71 → 287,16
};
 
/*
* enum vmw_ctx_binding_type - abstract resource to context binding types
* enum vmw_display_unit_type - Describes the display unit
*/
enum vmw_ctx_binding_type {
vmw_ctx_binding_shader,
vmw_ctx_binding_rt,
vmw_ctx_binding_tex,
vmw_ctx_binding_max
enum vmw_display_unit_type {
vmw_du_invalid = 0,
vmw_du_legacy,
vmw_du_screen_object,
vmw_du_screen_target
};
 
/**
* struct vmw_ctx_bindinfo - structure representing a single context binding
*
* @ctx: Pointer to the context structure. NULL means the binding is not
* active.
* @res: Non ref-counted pointer to the bound resource.
* @bt: The binding type.
* @i1: Union of information needed to unbind.
*/
struct vmw_ctx_bindinfo {
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
bool scrubbed;
union {
SVGA3dShaderType shader_type;
SVGA3dRenderTargetType rt_type;
uint32 texture_stage;
} i1;
};
 
/**
* struct vmw_ctx_binding - structure representing a single context binding
* - suitable for tracking in a context
*
* @ctx_list: List head for context.
* @res_list: List head for bound resource.
* @bi: Binding info
*/
struct vmw_ctx_binding {
struct list_head ctx_list;
struct list_head res_list;
struct vmw_ctx_bindinfo bi;
};
 
 
/**
* struct vmw_ctx_binding_state - context binding state
*
* @list: linked list of individual bindings.
* @render_targets: Render target bindings.
* @texture_units: Texture units/samplers bindings.
* @shaders: Shader bindings.
*
* Note that this structure also provides storage space for the individual
* struct vmw_ctx_binding objects, so that no dynamic allocation is needed
* for individual bindings.
*
*/
struct vmw_ctx_binding_state {
struct list_head list;
struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
};
 
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
357,8 → 310,8
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
uint32_t fence_flags;
struct ttm_buffer_object *cur_query_bo;
struct list_head ctx_resource_list; /* For contexts and cotables */
struct vmw_dma_buffer *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
365,8 → 318,13
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
struct vmw_ctx_binding_state *staged_bindings;
bool staged_bindings_inuse;
struct list_head staged_cmd_res;
struct vmw_resource_val_node *dx_ctx_node;
struct vmw_dma_buffer *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
};
 
struct vmw_legacy_display;
374,8 → 332,6
 
struct vmw_master {
struct ttm_lock lock;
struct mutex fb_surf_mutex;
struct list_head fb_surf;
};
 
struct vmw_vga_topology_state {
386,6 → 342,26
uint32_t pos_y;
};
 
 
/*
* struct vmw_otable - Guest Memory OBject table metadata
*
* @size: Size of the table (page-aligned).
* @page_table: Pointer to a struct vmw_mob holding the page table.
*/
struct vmw_otable {
unsigned long size;
struct vmw_mob *page_table;
bool enabled;
};
 
struct vmw_otable_batch {
unsigned num_otables;
struct vmw_otable *otables;
struct vmw_resource *context;
struct ttm_buffer_object *otable_bo;
};
 
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
403,10 → 379,13
uint32_t mmio_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
uint32_t texture_max_width;
uint32_t texture_max_height;
uint32_t stdu_max_width;
uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
__le32 __iomem *mmio_virt;
int mmio_mtrr;
u32 *mmio_virt;
uint32_t capabilities;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
415,7 → 394,9
uint32_t memory_size;
bool has_gmr;
bool has_mob;
struct mutex hw_mutex;
spinlock_t hw_lock;
spinlock_t cap_lock;
bool has_dx;
 
/*
* VGA registers.
435,6 → 416,7
*/
 
void *fb_info;
enum vmw_display_unit_type active_display_unit;
struct vmw_legacy_display *ldu_priv;
struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
465,13 → 447,15
atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
int fence_queue_waiters; /* Protected by hw_mutex */
int goal_queue_waiters; /* Protected by hw_mutex */
atomic_t fifo_queue_waiters;
spinlock_t waiter_lock;
int fence_queue_waiters; /* Protected by waiter_lock */
int goal_queue_waiters; /* Protected by waiter_lock */
int cmdbuf_waiters; /* Protected by waiter_lock */
int error_waiters; /* Protected by waiter_lock */
int fifo_queue_waiters; /* Protected by waiter_lock */
uint32_t last_read_seqno;
spinlock_t irq_lock;
struct vmw_fence_manager *fman;
uint32_t irq_mask;
uint32_t irq_mask; /* Updates protected by waiter_lock */
 
/*
* Device state
498,6 → 482,7
 
bool stealth;
bool enable_fb;
spinlock_t svga_lock;
 
/**
* Master management.
505,11 → 490,11
 
struct vmw_master *active_master;
struct vmw_master fbdev_master;
// struct notifier_block pm_nb;
bool suspended;
bool refuse_hibernation;
 
struct mutex release_mutex;
uint32_t num_3d_resources;
atomic_t num_fifo_resources;
 
/*
* Replace this with an rwsem as soon as we have down_xx_interruptible()
521,8 → 506,8
* are protected by the cmdbuf mutex.
*/
 
struct ttm_buffer_object *dummy_query_bo;
struct ttm_buffer_object *pinned_bo;
struct vmw_dma_buffer *dummy_query_bo;
struct vmw_dma_buffer *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
545,8 → 530,9
/*
* Guest Backed stuff
*/
struct ttm_buffer_object *otable_bo;
struct vmw_otable *otables;
struct vmw_otable_batch otable_batch;
 
struct vmw_cmdbuf_man *cman;
};
 
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
569,26 → 555,42
return (struct vmw_master *) master->driver_priv;
}
 
/*
* The locking here is fine-grained, so that it is performed once
* for every read- and write operation. This is of course costly, but we
* don't perform much register access in the timing critical paths anyway.
* Instead we have the extra benefit of being sure that we don't forget
* the hw lock around register accesses.
*/
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
}
 
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
uint32_t val;
unsigned long irq_flags;
u32 val;
 
spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
 
return val;
}
 
int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
extern void vmw_svga_enable(struct vmw_private *dev_priv);
extern void vmw_svga_disable(struct vmw_private *dev_priv);
 
 
/**
* GMR utilities - vmwgfx_gmr.c
*/
609,7 → 611,8
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
635,7 → 638,8
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf);
struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf,
uint32_t *handle);
649,7 → 653,8
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out);
uint32_t id, struct vmw_dma_buffer **out,
struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
659,10 → 664,14
uint32_t *inout_id,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
670,25 → 679,25
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
*/
extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
struct ttm_placement *placement,
bool interruptible);
extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible);
extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
bool interruptible);
extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible);
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
bool interruptible);
extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool pin, bool interruptible);
bool interruptible);
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
 
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
702,6 → 711,8
struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern unsigned int vmw_fops_poll(struct file *filp,
struct poll_table_struct *wait);
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
 
714,14 → 725,20
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void *
vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid);
extern int vmw_fifo_flush(struct vmw_private *dev_priv,
bool interruptible);
 
/**
* TTM glue - vmwgfx_ttm_glue.c
746,6 → 763,7
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
796,8 → 814,8
* Command submission - vmwgfx_execbuf.c
*/
 
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
struct drm_file *file_priv, size_t size);
extern int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
804,6 → 822,7
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
822,7 → 841,12
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle);
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob);
 
 
/**
* IRQs and wating - vmwgfx_irq.c
*/
848,6 → 872,10
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
int *waiter_count);
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count);
 
/**
* Rudimentary fence-like objects currently used only for throttling -
894,9 → 922,9
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
int vmw_enable_vblank(struct drm_device *dev, int crtc);
void vmw_disable_vblank(struct drm_device *dev, int crtc);
u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
904,14 → 932,9
uint32_t sid, int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips);
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *clips,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
 
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
923,6 → 946,10
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
 
/**
* Overlay control - vmwgfx_overlay.c
*/
946,6 → 973,18
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
 
/**
* Prime - vmwgfx_prime.c
*/
 
extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
extern int vmw_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv,
int fd, u32 *handle);
extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
 
/*
* MemoryOBject management - vmwgfx_mob.c
*/
966,8 → 1005,6
 
extern const struct vmw_user_resource_conv *user_context_converter;
 
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
 
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
974,18 → 1011,27
struct vmw_resource **p_res);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci);
extern void
vmw_context_binding_state_transfer(struct vmw_resource *res,
struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
extern struct vmw_cmdbuf_res_manager *
vmw_context_res_man(struct vmw_resource *ctx);
extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
SVGACOTableType cotable_type);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
struct vmw_ctx_binding_state;
extern struct vmw_ctx_binding_state *
vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_dma_buffer *mob);
extern struct vmw_dma_buffer *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
 
 
/*
* Surface management - vmwgfx_surface.c
*/
993,11 → 1039,31
extern const struct vmw_user_resource_conv *user_surface_converter;
 
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle, int *id);
extern int vmw_surface_validate(struct vmw_private *dev_priv,
struct vmw_surface *srf);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
uint32_t svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
struct vmw_surface **srf_out);
 
/*
* Shader management - vmwgfx_shader.c
1004,6 → 1070,37
*/
 
extern const struct vmw_user_resource_conv *user_shader_converter;
 
extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct list_head *list);
extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list);
extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource *ctx,
u32 user_key,
SVGA3dShaderType shader_type,
struct list_head *list);
extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
struct list_head *list,
bool readback);
 
extern struct vmw_resource *
vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type);
 
/*
* Command buffer managed resources - vmwgfx_cmdbuf_res.c
*/
 
extern struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1022,9 → 1119,50
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct list_head *list);
struct list_head *list,
struct vmw_resource **res);
 
/*
* COTable management - vmwgfx_cotable.c
*/
extern const SVGACOTableType vmw_cotable_scrub_order[];
extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
struct vmw_resource *ctx,
u32 type);
extern int vmw_cotable_notify(struct vmw_resource *res, int id);
extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
struct list_head *head);
 
/*
* Command buffer managerment vmwgfx_cmdbuf.c
*/
struct vmw_cmdbuf_man;
struct vmw_cmdbuf_header;
 
extern struct vmw_cmdbuf_man *
vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
size_t size, size_t default_size);
extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
unsigned long timeout);
extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
int ctx_id, bool interruptible,
struct vmw_cmdbuf_header *header);
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header,
bool flush);
extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header);
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible);
 
 
/**
* Inline helper functions
*/
1068,15 → 1206,39
return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
}
 
extern struct drm_device *main_device;
extern struct drm_file *drm_file_handlers[256];
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{
atomic_inc(&dev_priv->num_fifo_resources);
}
 
typedef struct
static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
{
int width;
int height;
int bpp;
int freq;
}videomode_t;
atomic_dec(&dev_priv->num_fifo_resources);
}
 
/**
* vmw_mmio_read - Perform a MMIO read from volatile memory
*
* @addr: The address to read from
*
* This function is intended to be equivalent to ioread32() on
* memremap'd memory, but without byteswapping.
*/
static inline u32 vmw_mmio_read(u32 *addr)
{
return READ_ONCE(*addr);
}
 
/**
* vmw_mmio_write - Perform a MMIO write to volatile memory
*
* @addr: The address to write to
*
* This function is intended to be equivalent to iowrite32 on
* memremap'd memory, but without byteswapping.
*/
static inline void vmw_mmio_write(u32 value, u32 *addr)
{
WRITE_ONCE(*addr, value);
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_execbuf.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
29,6 → 29,8
#include "vmwgfx_reg.h"
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
 
#define VMW_RES_HT_ORDER 12
 
59,8 → 61,11
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
* @no_buffer_needed: Resources do not need to allocate buffer backup on
* reservation. The command stream will provide one.
* @switching_backup: The command stream provides a new backup buffer for a
* resource.
* @no_buffer_needed: This means @switching_backup is true on first buffer
* reference. So resource reservation does not need to allocate a backup
* buffer for the resource.
*/
struct vmw_resource_val_node {
struct list_head head;
69,8 → 74,9
struct vmw_dma_buffer *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
bool first_usage;
bool no_buffer_needed;
u32 first_usage : 1;
u32 switching_backup : 1;
u32 no_buffer_needed : 1;
};
 
/**
92,22 → 98,40
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
(_gb_disable), (_gb_enable)}
 
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource *ctx);
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_dma_buffer **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
 
 
/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* vmw_resources_unreserve - unreserve resources previously reserved for
* command submission.
*
* @list_head: list of resources to unreserve.
* @sw_context: pointer to the software context
* @backoff: Whether command submission failed.
*/
static void vmw_resource_list_unreserve(struct list_head *list,
static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
bool backoff)
{
struct vmw_resource_val_node *val;
struct list_head *list = &sw_context->resource_list;
 
if (sw_context->dx_query_mob && !backoff)
vmw_context_bind_dx_query(sw_context->dx_query_ctx,
sw_context->dx_query_mob);
 
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
struct vmw_dma_buffer *new_backup =
backoff ? NULL : val->new_backup;
bool switch_backup =
(backoff) ? false : val->switching_backup;
 
/*
* Transfer staged context bindings to the
115,19 → 139,72
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
vmw_context_binding_state_transfer
(val->res, val->staged_bindings);
vmw_binding_state_commit
(vmw_context_binding_state(val->res),
val->staged_bindings);
}
kfree(val->staged_bindings);
 
if (val->staged_bindings != sw_context->staged_bindings)
vmw_binding_state_free(val->staged_bindings);
else
sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
vmw_resource_unreserve(res, new_backup,
vmw_resource_unreserve(res, switch_backup, val->new_backup,
val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
}
}
 
/**
* vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
* added to the validate list.
*
* @dev_priv: Pointer to the device private:
* @sw_context: The validation context:
* @node: The validation node holding this context.
*/
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource_val_node *node)
{
int ret;
 
ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
if (unlikely(ret != 0))
goto out_err;
 
if (!sw_context->staged_bindings) {
sw_context->staged_bindings =
vmw_binding_state_alloc(dev_priv);
if (IS_ERR(sw_context->staged_bindings)) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
ret = PTR_ERR(sw_context->staged_bindings);
sw_context->staged_bindings = NULL;
goto out_err;
}
}
 
if (sw_context->staged_bindings_inuse) {
node->staged_bindings = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(node->staged_bindings)) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
ret = PTR_ERR(node->staged_bindings);
node->staged_bindings = NULL;
goto out_err;
}
} else {
node->staged_bindings = sw_context->staged_bindings;
sw_context->staged_bindings_inuse = true;
}
 
return 0;
out_err:
return ret;
}
 
/**
* vmw_resource_val_add - Add a resource to the software context's
* resource list if it's not already on it.
141,6 → 218,7
struct vmw_resource *res,
struct vmw_resource_val_node **p_node)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret;
169,17 → 247,93
kfree(node);
return ret;
}
list_add_tail(&node->head, &sw_context->resource_list);
node->res = vmw_resource_reference(res);
node->first_usage = true;
 
if (unlikely(p_node != NULL))
*p_node = node;
 
if (!dev_priv->has_mob) {
list_add_tail(&node->head, &sw_context->resource_list);
return 0;
}
 
switch (vmw_res_type(res)) {
case vmw_res_context:
case vmw_res_dx_context:
list_add(&node->head, &sw_context->ctx_resource_list);
ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
break;
case vmw_res_cotable:
list_add_tail(&node->head, &sw_context->ctx_resource_list);
break;
default:
list_add_tail(&node->head, &sw_context->resource_list);
break;
}
 
return ret;
}
 
/**
* vmw_view_res_val_add - Add a view and the surface it's pointing to
* to the validation list
*
* @sw_context: The software context holding the validation list.
* @view: Pointer to the view resource.
*
* Returns 0 if success, negative error code otherwise.
*/
static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *view)
{
int ret;
 
/*
* First add the resource the view is pointing to, otherwise
* it may be swapped out when the view is validated.
*/
ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
if (ret)
return ret;
 
return vmw_resource_val_add(sw_context, view, NULL);
}
 
/**
* vmw_view_id_val_add - Look up a view and add it and the surface it's
* pointing to to the validation list.
*
* @sw_context: The software context holding the validation list.
* @view_type: The view type to look up.
* @id: view id of the view.
*
* The view is represented by a view id and the DX context it's created on,
* or scheduled for creation on. If there is no DX context set, the function
* will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
*/
static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
enum vmw_view_type view_type, u32 id)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *view;
int ret;
 
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
view = vmw_view_lookup(sw_context->man, view_type, id);
if (IS_ERR(view))
return PTR_ERR(view);
 
ret = vmw_view_res_val_add(sw_context, view);
vmw_resource_unreference(&view);
 
return ret;
}
 
/**
* vmw_resource_context_res_add - Put resources previously bound to a context on
* the validation list
*
195,24 → 349,56
struct vmw_resource *ctx)
{
struct list_head *binding_list;
struct vmw_ctx_binding *entry;
struct vmw_ctx_bindinfo *entry;
int ret = 0;
struct vmw_resource *res;
u32 i;
 
/* Add all cotables to the validation list. */
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
res = vmw_context_cotable(ctx, i);
if (IS_ERR(res))
continue;
 
ret = vmw_resource_val_add(sw_context, res, NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
}
}
 
 
/* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
 
list_for_each_entry(entry, binding_list, ctx_list) {
res = vmw_resource_reference_unless_doomed(entry->bi.res);
/* entry->res is not refcounted */
res = vmw_resource_reference_unless_doomed(entry->res);
if (unlikely(res == NULL))
continue;
 
ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
ret = vmw_resource_val_add(sw_context, entry->res,
NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
}
 
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
struct vmw_dma_buffer *dx_query_mob;
 
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
ret = vmw_bo_to_validate_list(sw_context,
dx_query_mob,
true, NULL);
}
 
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
308,7 → 494,7
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
318,7 → 504,7
struct drm_hash_item *hash;
int ret;
 
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
336,7 → 522,7
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
vval_buf->hash.key = (unsigned long) bo;
vval_buf->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
345,14 → 531,12
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
val_buf->reserved = false;
val_buf->bo = ttm_bo_reference(&vbo->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
}
 
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
 
if (p_val_node)
*p_val_node = val_node;
 
372,20 → 556,20
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
int ret = 0;
 
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
 
ret = vmw_resource_reserve(res, val->no_buffer_needed);
ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
 
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
struct vmw_dma_buffer *vbo = res->backup;
 
ret = vmw_bo_to_validate_list
(sw_context, bo,
(sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
 
if (unlikely(ret != 0))
392,9 → 576,21
return ret;
}
}
return 0;
 
if (sw_context->dx_query_mob) {
struct vmw_dma_buffer *expected_dx_query_mob;
 
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
if (expected_dx_query_mob &&
expected_dx_query_mob != sw_context->dx_query_mob) {
ret = -EINVAL;
}
}
 
return ret;
}
 
/**
* vmw_resources_validate - Validate all resources on the sw_context's
* resource list.
411,6 → 607,7
 
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
struct vmw_dma_buffer *backup = res->backup;
 
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
418,11 → 615,23
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
 
/* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) {
struct vmw_dma_buffer *vbo = res->backup;
 
ret = vmw_bo_to_validate_list
(sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
if (ret) {
ttm_bo_unreserve(&vbo->base);
return ret;
}
}
}
return 0;
}
 
 
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
429,7 → 638,6
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
437,7 → 645,6
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
uint32_t *id_loc,
struct vmw_resource *res,
struct vmw_resource_val_node **p_val)
450,40 → 657,16
res,
id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_err;
return ret;
 
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
goto out_err;
return ret;
 
if (res_type == vmw_res_context && dev_priv->has_mob &&
node->first_usage) {
 
/*
* Put contexts first on the list to be able to exit
* list traversal for contexts early.
*/
list_del(&node->head);
list_add(&node->head, &sw_context->resource_list);
 
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_err;
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
goto out_err;
}
INIT_LIST_HEAD(&node->staged_bindings->list);
}
 
if (p_val)
*p_val = node;
 
out_err:
return ret;
return 0;
}
 
 
549,7 → 732,7
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) *id_loc);
.. dump_stack();
// dump_stack();
return ret;
}
 
557,7 → 740,7
rcache->res = res;
rcache->handle = *id_loc;
 
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
576,6 → 759,46
}
 
/**
* vmw_rebind_dx_query - Rebind DX query associated with the context
*
* @ctx_res: context the query belongs to
*
* This function assumes binding_mutex is held.
*/
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
struct vmw_dma_buffer *dx_query_mob;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindAllQuery body;
} *cmd;
 
 
dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 
if (!dx_query_mob || dx_query_mob->dx_query_ctx)
return 0;
 
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
 
if (cmd == NULL) {
DRM_ERROR("Failed to rebind queries.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
cmd->body.mobid = dx_query_mob->base.mem.start;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 
return 0;
}
 
/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
592,12 → 815,17
if (unlikely(!val->staged_bindings))
break;
 
ret = vmw_context_rebind_all(val->res);
ret = vmw_binding_rebind_all
(vmw_context_binding_state(val->res));
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
 
ret = vmw_rebind_all_dx_query(val->res);
if (ret != 0)
return ret;
}
 
return 0;
604,6 → 832,69
}
 
/**
* vmw_view_bindings_add - Add an array of view bindings to a context
* binding state tracker.
*
* @sw_context: The execbuf state used for this command.
* @view_type: View type for the bindings.
* @binding_type: Binding type for the bindings.
* @shader_slot: The shader slot to user for the bindings.
* @view_ids: Array of view ids to be bound.
* @num_views: Number of view ids in @view_ids.
* @first_slot: The binding slot to be used for the first view id in @view_ids.
*/
static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
enum vmw_view_type view_type,
enum vmw_ctx_binding_type binding_type,
uint32 shader_slot,
uint32 view_ids[], u32 num_views,
u32 first_slot)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_cmdbuf_res_manager *man;
u32 i;
int ret;
 
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
man = sw_context->man;
for (i = 0; i < num_views; ++i) {
struct vmw_ctx_bindinfo_view binding;
struct vmw_resource *view = NULL;
 
if (view_ids[i] != SVGA3D_INVALID_ID) {
view = vmw_view_lookup(man, view_type, view_ids[i]);
if (IS_ERR(view)) {
DRM_ERROR("View not found.\n");
return PTR_ERR(view);
}
 
ret = vmw_view_res_val_add(sw_context, view);
if (ret) {
DRM_ERROR("Could not add view to "
"validation list.\n");
vmw_resource_unreference(&view);
return ret;
}
}
binding.bi.ctx = ctx_node->res;
binding.bi.res = view;
binding.bi.bt = binding_type;
binding.shader_slot = shader_slot;
binding.slot = first_slot + i;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
shader_slot, binding.slot);
if (view)
vmw_resource_unreference(&view);
}
 
return 0;
}
 
/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
641,6 → 932,12
 
cmd = container_of(header, struct vmw_sid_cmd, header);
 
if (cmd->body.type >= SVGA3D_RT_MAX) {
DRM_ERROR("Illegal render target type %u.\n",
(unsigned) cmd->body.type);
return -EINVAL;
}
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
654,13 → 951,14
return ret;
 
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi;
struct vmw_ctx_bindinfo_view binding;
 
bi.ctx = ctx_node->res;
bi.res = res_node ? res_node->res : NULL;
bi.bt = vmw_ctx_binding_rt;
bi.i1.rt_type = cmd->body.type;
return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
binding.bi.ctx = ctx_node->res;
binding.bi.res = res_node ? res_node->res : NULL;
binding.bi.bt = vmw_ctx_binding_rt;
binding.slot = cmd->body.type;
vmw_binding_add(ctx_node->staged_bindings,
&binding.bi, 0, binding.slot);
}
 
return 0;
677,16 → 975,62
int ret;
 
cmd = container_of(header, struct vmw_sid_cmd, header);
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
if (ret)
return ret;
 
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
 
static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBufferCopy body;
} *cmd;
int ret;
 
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src, NULL);
if (ret != 0)
return ret;
 
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest, NULL);
}
 
static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXPredCopyRegion body;
} *cmd;
int ret;
 
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.srcSid, NULL);
if (ret != 0)
return ret;
 
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dstSid, NULL);
}
 
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
755,7 → 1099,7
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct ttm_buffer_object *new_query_bo,
struct vmw_dma_buffer *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
767,7 → 1111,7
 
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
if (unlikely(new_query_bo->num_pages > 4)) {
if (unlikely(new_query_bo->base.num_pages > 4)) {
DRM_ERROR("Query buffer too large.\n");
return -EINVAL;
}
836,12 → 1180,12
 
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
vmw_bo_pin(dev_priv->pinned_bo, false);
ttm_bo_unref(&dev_priv->pinned_bo);
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
}
 
if (!sw_context->needs_post_query_barrier) {
vmw_bo_pin(sw_context->cur_query_bo, true);
vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
 
/*
* We pin also the dummy_query_bo buffer so that we
849,14 → 1193,17
* dummy queries in context destroy paths.
*/
 
vmw_bo_pin(dev_priv->dummy_query_bo, true);
if (!dev_priv->dummy_query_bo_pinned) {
vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
true);
dev_priv->dummy_query_bo_pinned = true;
}
 
BUG_ON(sw_context->last_query_ctx == NULL);
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
ttm_bo_reference(sw_context->cur_query_bo);
vmw_dmabuf_reference(sw_context->cur_query_bo);
}
}
}
885,17 → 1232,17
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
 
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
ret = -EINVAL;
goto out_no_reloc;
}
bo = &vmw_bo->base;
 
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
908,7 → 1255,7
reloc->mob_loc = id;
reloc->location = NULL;
 
ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
 
917,7 → 1264,7
 
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL;
*vmw_bo_p = NULL;
return ret;
}
 
946,17 → 1293,17
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
 
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
ret = -EINVAL;
goto out_no_reloc;
}
bo = &vmw_bo->base;
 
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
968,7 → 1315,7
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
 
ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
 
977,11 → 1324,103
 
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL;
*vmw_bo_p = NULL;
return ret;
}
 
 
 
/**
* vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*
* This function adds the new query into the query COTABLE
*/
static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dx_define_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDXDefineQuery q;
} *cmd;
 
int ret;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *cotable_res;
 
 
if (ctx_node == NULL) {
DRM_ERROR("DX Context not set for query.\n");
return -EINVAL;
}
 
cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
 
if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
 
cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
vmw_resource_unreference(&cotable_res);
 
return ret;
}
 
 
 
/**
* vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*
* The query bind operation will eventually associate the query ID
* with its backing MOB. In this function, we take the user mode
* MOB ID and use vmw_translate_mob_ptr() to translate it to its
* kernel mode equivalent.
*/
static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dx_bind_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindQuery q;
} *cmd;
 
struct vmw_dma_buffer *vmw_bo;
int ret;
 
 
cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
 
/*
* Look up the buffer pointed to by q.mobid, put it on the relocation
* list so its kernel mode MOB ID can be filled in later
*/
ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
&vmw_bo);
 
if (ret != 0)
return ret;
 
sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
 
vmw_dmabuf_unreference(&vmw_bo);
 
return ret;
}
 
 
 
/**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
1075,7 → 1514,7
if (unlikely(ret != 0))
return ret;
 
ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
vmw_dmabuf_unreference(&vmw_bo);
return ret;
1129,7 → 1568,7
if (unlikely(ret != 0))
return ret;
 
ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
vmw_dmabuf_unreference(&vmw_bo);
return ret;
1363,6 → 1802,12
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
 
if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
DRM_ERROR("Illegal texture/sampler unit %u.\n",
(unsigned) cur_state->stage);
return -EINVAL;
}
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cur_state->value, &res_node);
1370,14 → 1815,14
return ret;
 
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi;
struct vmw_ctx_bindinfo_tex binding;
 
bi.ctx = ctx_node->res;
bi.res = res_node ? res_node->res : NULL;
bi.bt = vmw_ctx_binding_tex;
bi.i1.texture_stage = cur_state->stage;
vmw_context_binding_add(ctx_node->staged_bindings,
&bi);
binding.bi.ctx = ctx_node->res;
binding.bi.res = res_node ? res_node->res : NULL;
binding.bi.bt = vmw_ctx_binding_tex;
binding.texture_stage = cur_state->stage;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
0, binding.texture_stage);
}
}
 
1407,7 → 1852,48
return ret;
}
 
 
/**
* vmw_cmd_res_switch_backup - Utility function to handle backup buffer
* switching
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @val_node: The validation node representing the resource.
* @buf_id: Pointer to the user-space backup buffer handle in the command
* stream.
* @backup_offset: Offset of backup into MOB.
*
* This function prepares for registering a switch of backup buffers
* in the resource metadata just prior to unreserving. It's basically a wrapper
* around vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource_val_node *val_node,
uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_dma_buffer *dma_buf;
int ret;
 
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
if (ret)
return ret;
 
val_node->switching_backup = true;
if (val_node->first_usage)
val_node->no_buffer_needed = true;
 
vmw_dmabuf_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset;
 
return 0;
}
 
 
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
* @dev_priv: Pointer to a device private struct.
1420,7 → 1906,8
* @backup_offset: Offset of backup into MOB.
*
* This function prepares for registering a switch of backup buffers
* in the resource metadata just prior to unreserving.
* in the resource metadata just prior to unreserving. It's basically a wrapper
* around vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
1431,27 → 1918,16
uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_resource_val_node *val_node;
int ret;
struct vmw_dma_buffer *dma_buf;
struct vmw_resource_val_node *val_node;
 
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
converter, res_id, &val_node);
if (unlikely(ret != 0))
if (ret)
return ret;
 
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
if (unlikely(ret != 0))
return ret;
 
if (val_node->first_usage)
val_node->no_buffer_needed = true;
 
vmw_dmabuf_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset;
 
return 0;
return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
buf_id, backup_offset);
}
 
/**
1623,8 → 2099,101
&cmd->body.sid, NULL);
}
 
#if 0
 
/**
* vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_shader_define_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDefineShader body;
} *cmd;
int ret;
size_t size;
struct vmw_resource_val_node *val;
 
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&val);
if (unlikely(ret != 0))
return ret;
 
if (unlikely(!dev_priv->has_mob))
return 0;
 
size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(dev_priv,
vmw_context_res_man(val->res),
cmd->body.shid, cmd + 1,
cmd->body.type, size,
&sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
 
return vmw_resource_relocation_add(&sw_context->res_relocations,
NULL, &cmd->header.id -
sw_context->buf_start);
 
return 0;
}
 
/**
* vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_shader_destroy_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
struct vmw_resource_val_node *val;
 
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&val);
if (unlikely(ret != 0))
return ret;
 
if (unlikely(!dev_priv->has_mob))
return 0;
 
ret = vmw_shader_remove(vmw_context_res_man(val->res),
cmd->body.shid,
cmd->body.type,
&sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
 
return vmw_resource_relocation_add(&sw_context->res_relocations,
NULL, &cmd->header.id -
sw_context->buf_start);
 
return 0;
}
 
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
*
1641,7 → 2210,7
SVGA3dCmdSetShader body;
} *cmd;
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
struct vmw_ctx_bindinfo bi;
struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *res = NULL;
int ret;
 
1648,6 → 2217,12
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
 
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
DRM_ERROR("Illegal shader type %u.\n",
(unsigned) cmd->body.type);
return -EINVAL;
}
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
1658,14 → 2233,12
return 0;
 
if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_compat_shader_lookup
(vmw_context_res_man(ctx_node->res),
res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
cmd->body.shid,
cmd->body.type);
 
if (!IS_ERR(res)) {
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
vmw_res_shader,
&cmd->body.shid, res,
&res_node);
vmw_resource_unreference(&res);
1683,13 → 2256,14
return ret;
}
 
bi.ctx = ctx_node->res;
bi.res = res_node ? res_node->res : NULL;
bi.bt = vmw_ctx_binding_shader;
bi.i1.shader_type = cmd->body.type;
return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
binding.bi.ctx = ctx_node->res;
binding.bi.res = res_node ? res_node->res : NULL;
binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
binding.shader_slot, 0);
return 0;
}
#endif
 
/**
* vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1724,7 → 2298,6
return 0;
}
 
#if 0
/**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
* command
1750,8 → 2323,691
&cmd->body.shid, &cmd->body.mobid,
cmd->body.offsetInBytes);
}
#endif
 
/**
* vmw_cmd_dx_set_single_constant_buffer - Validate an
* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
struct vmw_resource_val_node *res_node = NULL;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_cb binding;
int ret;
 
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.sid, &res_node);
if (unlikely(ret != 0))
return ret;
 
binding.bi.ctx = ctx_node->res;
binding.bi.res = res_node ? res_node->res : NULL;
binding.bi.bt = vmw_ctx_binding_cb;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
binding.offset = cmd->body.offsetInBytes;
binding.size = cmd->body.sizeInBytes;
binding.slot = cmd->body.slot;
 
if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
(unsigned) cmd->body.type,
(unsigned) binding.slot);
return -EINVAL;
}
 
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
binding.shader_slot, binding.slot);
 
return 0;
}
 
/**
* vmw_cmd_dx_set_shader_res - Validate an
* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetShaderResources body;
} *cmd = container_of(header, typeof(*cmd), header);
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
 
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
DRM_ERROR("Invalid shader binding.\n");
return -EINVAL;
}
 
return vmw_view_bindings_add(sw_context, vmw_view_sr,
vmw_ctx_binding_sr,
cmd->body.type - SVGA3D_SHADERTYPE_MIN,
(void *) &cmd[1], num_sr_view,
cmd->body.startView);
}
 
/**
* vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetShader body;
} *cmd;
struct vmw_resource *res = NULL;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_shader binding;
int ret = 0;
 
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
cmd = container_of(header, typeof(*cmd), header);
 
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
DRM_ERROR("Illegal shader type %u.\n",
(unsigned) cmd->body.type);
return -EINVAL;
}
 
if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
if (IS_ERR(res)) {
DRM_ERROR("Could not find shader for binding.\n");
return PTR_ERR(res);
}
 
ret = vmw_resource_val_add(sw_context, res, NULL);
if (ret)
goto out_unref;
}
 
binding.bi.ctx = ctx_node->res;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
 
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
binding.shader_slot, 0);
out_unref:
if (res)
vmw_resource_unreference(&res);
 
return ret;
}
 
/**
* vmw_cmd_dx_set_vertex_buffers - Validates an
* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_vb binding;
struct vmw_resource_val_node *res_node;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetVertexBuffers body;
SVGA3dVertexBuffer buf[];
} *cmd;
int i, ret, num;
 
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
cmd = container_of(header, typeof(*cmd), header);
num = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dVertexBuffer);
if ((u64)num + (u64)cmd->body.startBuffer >
(u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
DRM_ERROR("Invalid number of vertex buffers.\n");
return -EINVAL;
}
 
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->buf[i].sid, &res_node);
if (unlikely(ret != 0))
return ret;
 
binding.bi.ctx = ctx_node->res;
binding.bi.bt = vmw_ctx_binding_vb;
binding.bi.res = ((res_node) ? res_node->res : NULL);
binding.offset = cmd->buf[i].offset;
binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer;
 
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
0, binding.slot);
}
 
return 0;
}
 
/**
* vmw_cmd_dx_ia_set_vertex_buffers - Validate an
* SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_ib binding;
struct vmw_resource_val_node *res_node;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetIndexBuffer body;
} *cmd;
int ret;
 
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.sid, &res_node);
if (unlikely(ret != 0))
return ret;
 
binding.bi.ctx = ctx_node->res;
binding.bi.res = ((res_node) ? res_node->res : NULL);
binding.bi.bt = vmw_ctx_binding_ib;
binding.offset = cmd->body.offset;
binding.format = cmd->body.format;
 
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
 
return 0;
}
 
/**
* vmw_cmd_dx_set_rendertarget - Validate an
* SVGA_3D_CMD_DX_SET_RENDERTARGETS command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetRenderTargets body;
} *cmd = container_of(header, typeof(*cmd), header);
int ret;
u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dRenderTargetViewId);
 
if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
DRM_ERROR("Invalid DX Rendertarget binding.\n");
return -EINVAL;
}
 
ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
vmw_ctx_binding_ds, 0,
&cmd->body.depthStencilViewId, 1, 0);
if (ret)
return ret;
 
return vmw_view_bindings_add(sw_context, vmw_view_rt,
vmw_ctx_binding_dx_rt, 0,
(void *)&cmd[1], num_rt_view, 0);
}
 
/**
* vmw_cmd_dx_clear_rendertarget_view - Validate an
* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXClearRenderTargetView body;
} *cmd = container_of(header, typeof(*cmd), header);
 
return vmw_view_id_val_add(sw_context, vmw_view_rt,
cmd->body.renderTargetViewId);
}
 
/**
* vmw_cmd_dx_clear_rendertarget_view - Validate an
* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXClearDepthStencilView body;
} *cmd = container_of(header, typeof(*cmd), header);
 
return vmw_view_id_val_add(sw_context, vmw_view_ds,
cmd->body.depthStencilViewId);
}
 
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource_val_node *srf_node;
struct vmw_resource *res;
enum vmw_view_type view_type;
int ret;
/*
* This is based on the fact that all affected define commands have
* the same initial command body layout.
*/
struct {
SVGA3dCmdHeader header;
uint32 defined_id;
uint32 sid;
} *cmd;
 
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
view_type = vmw_view_cmd_to_type(header->id);
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->sid, &srf_node);
if (unlikely(ret != 0))
return ret;
 
res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
ret = vmw_cotable_notify(res, cmd->defined_id);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
 
return vmw_view_add(sw_context->man,
ctx_node->res,
srf_node->res,
view_type,
cmd->defined_id,
header,
header->size + sizeof(*header),
&sw_context->staged_cmd_res);
}
 
/**
* vmw_cmd_dx_set_so_targets - Validate an
* SVGA_3D_CMD_DX_SET_SOTARGETS command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_so binding;
struct vmw_resource_val_node *res_node;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSOTargets body;
SVGA3dSoTarget targets[];
} *cmd;
int i, ret, num;
 
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
cmd = container_of(header, typeof(*cmd), header);
num = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dSoTarget);
 
if (num > SVGA3D_DX_MAX_SOTARGETS) {
DRM_ERROR("Invalid DX SO binding.\n");
return -EINVAL;
}
 
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->targets[i].sid, &res_node);
if (unlikely(ret != 0))
return ret;
 
binding.bi.ctx = ctx_node->res;
binding.bi.res = ((res_node) ? res_node->res : NULL);
binding.bi.bt = vmw_ctx_binding_so,
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
 
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
0, binding.slot);
}
 
return 0;
}
 
static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
/*
* This is based on the fact that all affected define commands have
* the same initial command body layout.
*/
struct {
SVGA3dCmdHeader header;
uint32 defined_id;
} *cmd;
enum vmw_so_type so_type;
int ret;
 
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id);
vmw_resource_unreference(&res);
 
return ret;
}
 
/**
* vmw_cmd_dx_check_subresource - Validate an
* SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
union {
SVGA3dCmdDXReadbackSubResource r_body;
SVGA3dCmdDXInvalidateSubResource i_body;
SVGA3dCmdDXUpdateSubResource u_body;
SVGA3dSurfaceId sid;
};
} *cmd;
 
BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
offsetof(typeof(*cmd), sid));
BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
offsetof(typeof(*cmd), sid));
BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
offsetof(typeof(*cmd), sid));
 
cmd = container_of(header, typeof(*cmd), header);
 
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->sid, NULL);
}
 
static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
 
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
return 0;
}
 
/**
* vmw_cmd_dx_view_remove - validate a view remove command and
* schedule the view resource for removal.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*
* Check that the view exists, and if it was not created using this
* command batch, make sure it's validated (present in the device) so that
* the remove command will not confuse the device.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
} *cmd = container_of(header, typeof(*cmd), header);
enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
struct vmw_resource *view;
int ret;
 
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
ret = vmw_view_remove(sw_context->man,
cmd->body.view_id, view_type,
&sw_context->staged_cmd_res,
&view);
if (ret || !view)
return ret;
 
/*
* Add view to the validate list iff it was not created using this
* command batch.
*/
return vmw_view_res_val_add(sw_context, view);
}
 
/**
* vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDefineShader body;
} *cmd = container_of(header, typeof(*cmd), header);
int ret;
 
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId);
vmw_resource_unreference(&res);
if (ret)
return ret;
 
return vmw_dx_shader_add(sw_context->man, ctx_node->res,
cmd->body.shaderId, cmd->body.type,
&sw_context->staged_cmd_res);
}
 
/**
* vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDestroyShader body;
} *cmd = container_of(header, typeof(*cmd), header);
int ret;
 
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
 
ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
&sw_context->staged_cmd_res);
if (ret)
DRM_ERROR("Could not find shader to remove.\n");
 
return ret;
}
 
/**
* vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node;
struct vmw_resource_val_node *res_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXBindShader body;
} *cmd = container_of(header, typeof(*cmd), header);
int ret;
 
if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter,
&cmd->body.cid, &ctx_node);
if (ret)
return ret;
} else {
ctx_node = sw_context->dx_ctx_node;
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
}
 
res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
cmd->body.shid, 0);
if (IS_ERR(res)) {
DRM_ERROR("Could not find shader to bind.\n");
return PTR_ERR(res);
}
 
ret = vmw_resource_val_add(sw_context, res, &res_node);
if (ret) {
DRM_ERROR("Error creating resource validation node.\n");
goto out_unref;
}
 
 
ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
&cmd->body.mobid,
cmd->body.offsetInBytes);
out_unref:
vmw_resource_unreference(&res);
 
return ret;
}
 
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
1759,7 → 3015,7
uint32_t size_remaining = *size;
uint32_t cmd_id;
 
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
cmd_id = ((uint32_t *)buf)[0];
switch (cmd_id) {
case SVGA_CMD_UPDATE:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1890,7 → 3146,7
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
false, false, true),
1975,14 → 3231,14
const struct vmw_cmd_entry *entry;
bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
 
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
cmd_id = ((uint32_t *)buf)[0];
/* Handle any none 3D commands */
if (unlikely(cmd_id < SVGA_CMD_MAX))
return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
 
 
cmd_id = le32_to_cpu(header->id);
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
cmd_id = header->id;
*size = header->size + sizeof(SVGA3dCmdHeader);
 
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
2094,7 → 3350,8
*
* @list: The resource list.
*/
static void vmw_resource_list_unreference(struct list_head *list)
static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
struct list_head *list)
{
struct vmw_resource_val_node *val, *val_next;
 
2105,8 → 3362,15
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
if (unlikely(val->staged_bindings))
kfree(val->staged_bindings);
 
if (val->staged_bindings) {
if (val->staged_bindings != sw_context->staged_bindings)
vmw_binding_state_free(val->staged_bindings);
else
sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
 
kfree(val);
}
}
2132,24 → 3396,21
(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
 
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob)
{
struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
base);
int ret;
 
 
/*
* Don't validate pinned buffers.
*/
 
if (bo == dev_priv->pinned_bo ||
(bo == dev_priv->dummy_query_bo &&
dev_priv->dummy_query_bo_pinned))
if (vbo->pin_count > 0)
return 0;
 
if (validate_as_mob)
return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
false);
 
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
2158,7 → 3419,8
* used as a GMR, this will return -ENOMEM.
*/
 
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
 
2167,8 → 3429,7
* previous contents.
*/
 
DRM_INFO("Falling through to VRAM.\n");
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
return ret;
}
 
2180,6 → 3441,7
 
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
true,
entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
2247,13 → 3509,9
 
if (p_handle != NULL)
ret = vmw_user_fence_create(file_priv, dev_priv->fman,
sequence,
DRM_VMW_FENCE_FLAG_EXEC,
p_fence, p_handle);
sequence, p_fence, p_handle);
else
ret = vmw_fence_create(dev_priv->fman, sequence,
DRM_VMW_FENCE_FLAG_EXEC,
p_fence);
ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
 
if (unlikely(ret != 0 && !synced)) {
(void) vmw_fallback_wait(dev_priv, false, false,
2305,7 → 3563,7
BUG_ON(fence == NULL);
 
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->seqno;
fence_rep.seqno = fence->base.seqno;
vmw_update_seqno(dev_priv, &dev_priv->fifo);
fence_rep.passed_seqno = dev_priv->last_read_seqno;
}
2315,8 → 3573,8
* seeing fence_rep::error filled in. Typically
* user-space would have pre-set that member to -EFAULT.
*/
// ret = copy_to_user(user_fence_rep, &fence_rep,
// sizeof(fence_rep));
ret = copy_to_user(user_fence_rep, &fence_rep,
sizeof(fence_rep));
 
/*
* User-space lost the fence object. We need to sync
2326,14 → 3584,170
ttm_ref_object_base_unref(vmw_fp->tfile,
fence_handle, TTM_REF_USAGE);
DRM_ERROR("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, fence->signal_mask,
false, false,
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
}
 
/**
* vmw_execbuf_submit_fifo - Patch a command batch and submit it using
* the fifo.
*
* @dev_priv: Pointer to a device private structure.
* @kernel_commands: Pointer to the unpatched command batch.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
* Side effects: If this function returns 0, then the command batch
* pointed to by @kernel_commands will have been modified.
*/
static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
void *kernel_commands,
u32 command_size,
struct vmw_sw_context *sw_context)
{
void *cmd;
 
if (sw_context->dx_ctx_node)
cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
sw_context->dx_ctx_node->res->id);
else
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (!cmd) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
return -ENOMEM;
}
 
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
 
return 0;
}
 
/**
* vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
* the command buffer manager.
*
* @dev_priv: Pointer to a device private structure.
* @header: Opaque handle to the command buffer allocation.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
* Side effects: If this function returns 0, then the command buffer
* represented by @header will have been modified.
*/
static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
struct vmw_cmdbuf_header *header,
u32 command_size,
struct vmw_sw_context *sw_context)
{
u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
SVGA3D_INVALID_ID);
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
id, false, header);
 
vmw_apply_relocations(sw_context);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
 
return 0;
}
 
/**
* vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
* submission using a command buffer.
*
* @dev_priv: Pointer to a device private structure.
* @user_commands: User-space pointer to the commands to be submitted.
* @command_size: Size of the unpatched command batch.
* @header: Out parameter returning the opaque pointer to the command buffer.
*
* This function checks whether we can use the command buffer manager for
* submission and if so, creates a command buffer of suitable size and
* copies the user data into that buffer.
*
* On successful return, the function returns a pointer to the data in the
* command buffer and *@header is set to non-NULL.
* If command buffers could not be used, the function will return the value
* of @kernel_commands on function call. That value may be NULL. In that case,
* the value of *@header will be set to NULL.
* If an error is encountered, the function will return a pointer error value.
* If the function is interrupted by a signal while sleeping, it will return
* -ERESTARTSYS casted to a pointer error value.
*/
static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
void __user *user_commands,
void *kernel_commands,
u32 command_size,
struct vmw_cmdbuf_header **header)
{
size_t cmdbuf_size;
int ret;
 
*header = NULL;
if (!dev_priv->cman || kernel_commands)
return kernel_commands;
 
if (command_size > SVGA_CB_MAX_SIZE) {
DRM_ERROR("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
 
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
true, header);
if (IS_ERR(kernel_commands))
return kernel_commands;
 
ret = copy_from_user(kernel_commands, user_commands,
command_size);
if (ret) {
DRM_ERROR("Failed copying commands.\n");
vmw_cmdbuf_header_free(*header);
*header = NULL;
return ERR_PTR(-EFAULT);
}
 
return kernel_commands;
}
 
static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t handle)
{
struct vmw_resource_val_node *ctx_node;
struct vmw_resource *res;
int ret;
 
if (handle == SVGA3D_INVALID_ID)
return 0;
 
ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
handle, user_context_converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or user DX context 0x%08x.\n",
(unsigned) handle);
return ret;
}
 
ret = vmw_resource_val_add(sw_context, res, &ctx_node);
if (unlikely(ret != 0))
goto out_err;
 
sw_context->dx_ctx_node = ctx_node;
sw_context->man = vmw_context_res_man(res);
out_err:
vmw_resource_unreference(&res);
return ret;
}
 
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
2340,6 → 3754,7
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence)
{
2347,19 → 3762,33
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
struct vmw_cmdbuf_header *header;
struct ww_acquire_ctx ticket;
uint32_t handle;
void *cmd;
int ret;
 
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
 
if (ret)
return ret;
}
 
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
kernel_commands, command_size,
&header);
if (IS_ERR(kernel_commands))
return PTR_ERR(kernel_commands);
 
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
if (ret) {
ret = -ERESTARTSYS;
goto out_free_header;
}
 
/*
sw_context->kernel = false;
if (kernel_commands == NULL) {
sw_context->kernel = false;
 
ret = vmw_resize_cmd_bounce(sw_context, command_size);
if (unlikely(ret != 0))
goto out_unlock;
2374,20 → 3803,26
goto out_unlock;
}
kernel_commands = sw_context->cmd_bounce;
} else */
} else if (!header)
sw_context->kernel = true;
 
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
INIT_LIST_HEAD(&sw_context->ctx_resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
sw_context->dx_ctx_node = NULL;
sw_context->dx_query_mob = NULL;
sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
 
if (!sw_context->res_ht_initialized) {
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
2395,10 → 3830,24
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
INIT_LIST_HEAD(&resource_list);
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
if (unlikely(ret != 0)) {
list_splice_init(&sw_context->ctx_resource_list,
&sw_context->resource_list);
goto out_err_nores;
}
 
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
/*
* Merge the resource lists before checking the return status
* from vmd_cmd_check_all so that all the open hashtabs will
* be handled properly even if vmw_cmd_check_all fails.
*/
list_splice_init(&sw_context->ctx_resource_list,
&sw_context->resource_list);
 
if (unlikely(ret != 0))
goto out_err_nores;
 
2406,9 → 3855,10
if (unlikely(ret != 0))
goto out_err_nores;
 
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
goto out_err;
goto out_err_nores;
 
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
2418,14 → 3868,6
if (unlikely(ret != 0))
goto out_err;
 
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
 
if (unlikely(ret != 0))
goto out_err;
}
 
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
ret = -ERESTARTSYS;
2438,21 → 3880,18
goto out_unlock_binding;
}
 
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
goto out_unlock_binding;
if (!header) {
ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
command_size, sw_context);
} else {
ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
sw_context);
header = NULL;
}
mutex_unlock(&dev_priv->binding_mutex);
if (ret)
goto out_err;
 
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
 
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
 
vmw_fifo_commit(dev_priv, command_size);
 
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
&fence,
2466,8 → 3905,7
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
 
vmw_resource_list_unreserve(&sw_context->resource_list, false);
mutex_unlock(&dev_priv->binding_mutex);
vmw_resources_unreserve(sw_context, false);
 
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
2496,7 → 3934,7
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
vmw_resource_list_unreference(&resource_list);
vmw_resource_list_unreference(sw_context, &resource_list);
 
return 0;
 
2505,7 → 3943,7
out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_resources_unreserve(sw_context, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
2523,9 → 3961,12
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
vmw_resource_list_unreference(&resource_list);
vmw_resource_list_unreference(sw_context, &resource_list);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
out_free_header:
if (header)
vmw_cmdbuf_header_free(header);
 
return ret;
}
2544,10 → 3985,12
DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
 
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
if (dev_priv->dummy_query_bo_pinned) {
vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
}
}
 
 
/**
2588,16 → 4031,16
 
INIT_LIST_HEAD(&validate_list);
 
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list);
 
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
 
do {
ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
} while (ret == -ERESTARTSYS);
 
ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
false, NULL);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve;
2613,10 → 4056,11
dev_priv->query_cid_valid = false;
}
 
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
if (dev_priv->dummy_query_bo_pinned) {
vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
 
}
if (fence == NULL) {
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
NULL);
2628,7 → 4072,9
 
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
DRM_INFO("Dummy query bo pin count: %d\n",
dev_priv->dummy_query_bo->pin_count);
 
out_unlock:
return;
2638,7 → 4084,7
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
}
 
/**
2667,44 → 4113,74
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
 
 
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
struct drm_file *file_priv, size_t size)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
struct drm_vmw_execbuf_arg arg;
int ret;
static const size_t copy_offset[] = {
offsetof(struct drm_vmw_execbuf_arg, context_handle),
sizeof(struct drm_vmw_execbuf_arg)};
 
if (unlikely(size < copy_offset[0])) {
DRM_ERROR("Invalid command size, ioctl %d\n",
DRM_VMW_EXECBUF);
return -EINVAL;
}
 
if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
return -EFAULT;
 
/*
* This will allow us to extend the ioctl argument while
* Extend the ioctl argument while
* maintaining backwards compatibility:
* We take different code paths depending on the value of
* arg->version.
* arg.version.
*/
 
if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
arg.version == 0)) {
DRM_ERROR("Incorrect execbuf version.\n");
DRM_ERROR("You're running outdated experimental "
"vmwgfx user-space drivers.");
return -EINVAL;
}
 
if (arg.version > 1 &&
copy_from_user(&arg.context_handle,
(void __user *) (data + copy_offset[0]),
copy_offset[arg.version - 1] -
copy_offset[0]) != 0)
return -EFAULT;
 
switch (arg.version) {
case 1:
arg.context_handle = (uint32_t) -1;
break;
case 2:
if (arg.pad64 != 0) {
DRM_ERROR("Unused IOCTL data not set to zero.\n");
return -EINVAL;
}
break;
default:
break;
}
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
(void __user *)(unsigned long)arg->fence_rep,
(void __user *)(unsigned long)arg.commands,
NULL, arg.command_size, arg.throttle_us,
arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep,
NULL);
 
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
goto out_unlock;
return ret;
 
// vmw_kms_cursor_post_execbuf(dev_priv);
 
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
return 0;
}
/drivers/video/drm/vmwgfx/vmwgfx_fb.c
0,0 → 1,835
/**************************************************************************
*
* Copyright © 2007 David Airlie
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include <linux/export.h>
 
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_kms.h"
 
#include <drm/ttm/ttm_placement.h>
 
#define VMW_DIRTY_DELAY 0
 
struct vmw_fb_par {
struct vmw_private *vmw_priv;
 
void *vmalloc;
 
struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo;
struct ttm_bo_kmap_obj map;
void *bo_ptr;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
u32 fb_x;
u32 fb_y;
bool bo_iowrite;
 
u32 pseudo_palette[17];
 
unsigned max_width;
unsigned max_height;
 
struct {
spinlock_t lock;
bool active;
unsigned x1;
unsigned y1;
unsigned x2;
unsigned y2;
} dirty;
 
struct drm_crtc *crtc;
struct drm_connector *con;
struct delayed_work local_work;
};
 
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
u32 *pal = par->pseudo_palette;
 
if (regno > 15) {
DRM_ERROR("Bad regno %u.\n", regno);
return 1;
}
 
switch (par->set_fb->depth) {
case 24:
case 32:
pal[regno] = ((red & 0xff00) << 8) |
(green & 0xff00) |
((blue & 0xff00) >> 8);
break;
default:
DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
par->set_fb->bits_per_pixel);
return 1;
}
 
return 0;
}
 
static int vmw_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int depth = var->bits_per_pixel;
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
 
switch (var->bits_per_pixel) {
case 32:
depth = (var->transp.length > 0) ? 32 : 24;
break;
default:
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
return -EINVAL;
}
 
switch (depth) {
case 24:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 32:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 8;
var->transp.offset = 24;
break;
default:
DRM_ERROR("Bad depth %u.\n", depth);
return -EINVAL;
}
 
if ((var->xoffset + var->xres) > par->max_width ||
(var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
}
 
if (!vmw_kms_validate_mode_vram(vmw_priv,
var->xres * var->bits_per_pixel/8,
var->yoffset + var->yres)) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
}
 
return 0;
}
 
static int vmw_fb_blank(int blank, struct fb_info *info)
{
return 0;
}
 
/*
* Dirty code
*/
 
static void vmw_fb_dirty_flush(struct work_struct *work)
{
struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
local_work.work);
struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info;
unsigned long irq_flags;
s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
u32 cpp, max_x, max_y;
struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr;
 
if (vmw_priv->suspended)
return;
 
mutex_lock(&par->bo_mutex);
cur_fb = par->set_fb;
if (!cur_fb)
goto out_unlock;
 
spin_lock_irqsave(&par->dirty.lock, irq_flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
goto out_unlock;
}
 
/*
* Handle panning when copying from vmalloc to framebuffer.
* Clip dirty area to framebuffer.
*/
cpp = (cur_fb->bits_per_pixel + 7) / 8;
max_x = par->fb_x + cur_fb->width;
max_y = par->fb_y + cur_fb->height;
 
dst_x1 = par->dirty.x1 - par->fb_x;
dst_y1 = par->dirty.y1 - par->fb_y;
dst_x1 = max_t(s32, dst_x1, 0);
dst_y1 = max_t(s32, dst_y1, 0);
 
dst_x2 = par->dirty.x2 - par->fb_x;
dst_y2 = par->dirty.y2 - par->fb_y;
dst_x2 = min_t(s32, dst_x2, max_x);
dst_y2 = min_t(s32, dst_y2, max_y);
w = dst_x2 - dst_x1;
h = dst_y2 - dst_y1;
w = max_t(s32, 0, w);
h = max_t(s32, 0, h);
 
par->dirty.x1 = par->dirty.x2 = 0;
par->dirty.y1 = par->dirty.y2 = 0;
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
 
if (w && h) {
dst_ptr = (u8 *)par->bo_ptr +
(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
src_ptr = (u8 *)par->vmalloc +
((dst_y1 + par->fb_y) * info->fix.line_length +
(dst_x1 + par->fb_x) * cpp);
 
// while (h-- > 0) {
// memcpy(dst_ptr, src_ptr, w*cpp);
// dst_ptr += par->set_fb->pitches[0];
// src_ptr += info->fix.line_length;
// }
 
clip.x1 = dst_x1;
clip.x2 = dst_x2;
clip.y1 = dst_y1;
clip.y2 = dst_y2;
 
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
vmw_fifo_flush(vmw_priv, false);
}
out_unlock:
mutex_unlock(&par->bo_mutex);
}
 
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
unsigned x1, unsigned y1,
unsigned width, unsigned height)
{
unsigned long flags;
unsigned x2 = x1 + width;
unsigned y2 = y1 + height;
 
spin_lock_irqsave(&par->dirty.lock, flags);
if (par->dirty.x1 == par->dirty.x2) {
par->dirty.x1 = x1;
par->dirty.y1 = y1;
par->dirty.x2 = x2;
par->dirty.y2 = y2;
/* if we are active start the dirty work
* we share the work with the defio system */
if (par->dirty.active)
schedule_delayed_work(&par->local_work,
VMW_DIRTY_DELAY);
} else {
if (x1 < par->dirty.x1)
par->dirty.x1 = x1;
if (y1 < par->dirty.y1)
par->dirty.y1 = y1;
if (x2 > par->dirty.x2)
par->dirty.x2 = x2;
if (y2 > par->dirty.y2)
par->dirty.y2 = y2;
}
spin_unlock_irqrestore(&par->dirty.lock, flags);
}
 
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
 
if ((var->xoffset + var->xres) > var->xres_virtual ||
(var->yoffset + var->yres) > var->yres_virtual) {
DRM_ERROR("Requested panning can not fit in framebuffer\n");
return -EINVAL;
}
 
mutex_lock(&par->bo_mutex);
par->fb_x = var->xoffset;
par->fb_y = var->yoffset;
if (par->set_fb)
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
par->set_fb->height);
mutex_unlock(&par->bo_mutex);
 
return 0;
}
 
#if 0
static void vmw_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct vmw_fb_par *par = info->par;
unsigned long start, end, min, max;
unsigned long flags;
struct page *page;
int y1, y2;
 
min = ULONG_MAX;
max = 0;
list_for_each_entry(page, pagelist, lru) {
start = page->index << PAGE_SHIFT;
end = start + PAGE_SIZE - 1;
min = min(min, start);
max = max(max, end);
}
 
if (min < max) {
y1 = min / info->fix.line_length;
y2 = (max / info->fix.line_length) + 1;
 
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.x1 = 0;
par->dirty.y1 = y1;
par->dirty.x2 = info->var.xres;
par->dirty.y2 = y2;
spin_unlock_irqrestore(&par->dirty.lock, flags);
 
/*
* Since we've already waited on this work once, try to
* execute asap.
*/
cancel_delayed_work(&par->local_work);
schedule_delayed_work(&par->local_work, 0);
}
};
 
static struct fb_deferred_io vmw_defio = {
.delay = VMW_DIRTY_DELAY,
.deferred_io = vmw_deferred_io,
};
#endif
 
/*
* Draw code
*/
 
static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
cfb_fillrect(info, rect);
vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
rect->width, rect->height);
}
 
static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
cfb_copyarea(info, region);
vmw_fb_dirty_mark(info->par, region->dx, region->dy,
region->width, region->height);
}
 
static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
cfb_imageblit(info, image);
vmw_fb_dirty_mark(info->par, image->dx, image->dy,
image->width, image->height);
}
 
/*
* Bring up code
*/
 
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
size_t size, struct vmw_dma_buffer **out)
{
struct vmw_dma_buffer *vmw_bo;
int ret;
 
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
 
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
if (!vmw_bo) {
ret = -ENOMEM;
goto err_unlock;
}
 
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement,
false,
&vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
 
*out = vmw_bo;
ttm_write_unlock(&vmw_priv->reservation_sem);
 
return 0;
 
err_unlock:
ttm_write_unlock(&vmw_priv->reservation_sem);
return ret;
}
 
static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
int *depth)
{
switch (var->bits_per_pixel) {
case 32:
*depth = (var->transp.length > 0) ? 32 : 24;
break;
default:
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
return -EINVAL;
}
 
return 0;
}
 
static int vmw_fb_kms_detach(struct vmw_fb_par *par,
bool detach_bo,
bool unref_bo)
{
struct drm_framebuffer *cur_fb = par->set_fb;
int ret;
 
/* Detach the KMS framebuffer from crtcs */
if (par->set_mode) {
struct drm_mode_set set;
 
set.crtc = par->crtc;
set.x = 0;
set.y = 0;
set.mode = NULL;
set.fb = NULL;
set.num_connectors = 1;
set.connectors = &par->con;
ret = drm_mode_set_config_internal(&set);
if (ret) {
DRM_ERROR("Could not unset a mode.\n");
return ret;
}
drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
par->set_mode = NULL;
}
 
if (cur_fb) {
drm_framebuffer_unreference(cur_fb);
par->set_fb = NULL;
}
 
if (par->vmw_bo && detach_bo) {
if (par->bo_ptr) {
ttm_bo_kunmap(&par->map);
par->bo_ptr = NULL;
}
if (unref_bo)
vmw_dmabuf_unreference(&par->vmw_bo);
else
vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
}
 
return 0;
}
 
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
struct drm_mode_fb_cmd mode_cmd;
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
struct vmw_framebuffer *vfb;
int ret = 0;
size_t new_bo_size;
 
ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
if (ret)
return ret;
 
mode_cmd.width = var->xres;
mode_cmd.height = var->yres;
mode_cmd.bpp = var->bits_per_pixel;
mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
 
cur_fb = par->set_fb;
if (cur_fb && cur_fb->width == mode_cmd.width &&
cur_fb->height == mode_cmd.height &&
cur_fb->bits_per_pixel == mode_cmd.bpp &&
cur_fb->depth == mode_cmd.depth &&
cur_fb->pitches[0] == mode_cmd.pitch)
return 0;
 
/* Need new buffer object ? */
new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
ret = vmw_fb_kms_detach(par,
par->bo_size < new_bo_size ||
par->bo_size > 2*new_bo_size,
true);
if (ret)
return ret;
 
if (!par->vmw_bo) {
ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
&par->vmw_bo);
if (ret) {
DRM_ERROR("Failed creating a buffer object for "
"fbdev.\n");
return ret;
}
par->bo_size = new_bo_size;
}
 
vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
true, &mode_cmd);
if (IS_ERR(vfb))
return PTR_ERR(vfb);
 
par->set_fb = &vfb->base;
 
if (!par->bo_ptr) {
/*
* Pin before mapping. Since we don't know in what placement
* to pin, call into KMS to do it for us.
*/
ret = vfb->pin(vfb);
if (ret) {
DRM_ERROR("Could not pin the fbdev framebuffer.\n");
return ret;
}
 
ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
par->vmw_bo->base.num_pages, &par->map);
if (ret) {
vfb->unpin(vfb);
DRM_ERROR("Could not map the fbdev framebuffer.\n");
return ret;
}
 
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
}
 
return 0;
}
 
static int vmw_fb_set_par(struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
struct drm_mode_set set;
struct fb_var_screeninfo *var = &info->var;
struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
DRM_MODE_TYPE_DRIVER,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
struct drm_display_mode *old_mode;
struct drm_display_mode *mode;
int ret;
 
old_mode = par->set_mode;
mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
return -ENOMEM;
}
 
mode->hdisplay = var->xres;
mode->vdisplay = var->yres;
vmw_guess_mode_timing(mode);
 
if (old_mode && drm_mode_equal(old_mode, mode)) {
drm_mode_destroy(vmw_priv->dev, mode);
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
mode->hdisplay *
(var->bits_per_pixel + 7) / 8,
mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL;
}
 
mutex_lock(&par->bo_mutex);
drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_fb_kms_framebuffer(info);
if (ret)
goto out_unlock;
 
par->fb_x = var->xoffset;
par->fb_y = var->yoffset;
 
set.crtc = par->crtc;
set.x = 0;
set.y = 0;
set.mode = mode;
set.fb = par->set_fb;
set.num_connectors = 1;
set.connectors = &par->con;
 
ret = drm_mode_set_config_internal(&set);
if (ret)
goto out_unlock;
 
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
 
/* If there already was stuff dirty we wont
* schedule a new work, so lets do it now */
 
// schedule_delayed_work(&par->local_work, 0);
 
out_unlock:
if (old_mode)
drm_mode_destroy(vmw_priv->dev, old_mode);
par->set_mode = mode;
 
drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex);
 
return ret;
}
 
 
static struct fb_ops vmw_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = vmw_fb_check_var,
.fb_set_par = vmw_fb_set_par,
.fb_setcolreg = vmw_fb_setcolreg,
// .fb_fillrect = vmw_fb_fillrect,
// .fb_copyarea = vmw_fb_copyarea,
// .fb_imageblit = vmw_fb_imageblit,
// .fb_pan_display = vmw_fb_pan_display,
.fb_blank = vmw_fb_blank,
};
 
int vmw_fb_init(struct vmw_private *vmw_priv)
{
struct device *device = &vmw_priv->dev->pdev->dev;
struct vmw_fb_par *par;
struct fb_info *info;
unsigned fb_width, fb_height;
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
struct drm_display_mode *init_mode;
int ret;
 
fb_bpp = 32;
fb_depth = 24;
 
/* XXX As shouldn't these be as well. */
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
 
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
 
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
return -ENOMEM;
 
/*
* Par
*/
vmw_priv->fb_info = info;
par = info->par;
memset(par, 0, sizeof(*par));
INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
par->vmw_priv = vmw_priv;
par->vmalloc = NULL;
par->max_width = fb_width;
par->max_height = fb_height;
 
drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
par->max_height, &par->con,
&par->crtc, &init_mode);
if (ret) {
drm_modeset_unlock_all(vmw_priv->dev);
goto err_kms;
}
 
info->var.xres = init_mode->hdisplay;
info->var.yres = init_mode->vdisplay;
drm_modeset_unlock_all(vmw_priv->dev);
 
/*
* Create buffers and alloc memory
*/
par->vmalloc = vzalloc(fb_size);
if (unlikely(par->vmalloc == NULL)) {
ret = -ENOMEM;
goto err_free;
}
 
/*
* Fixed and var
*/
strcpy(info->fix.id, "svgadrmfb");
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.line_length = fb_pitch;
 
info->fix.smem_start = 0;
info->fix.smem_len = fb_size;
 
info->pseudo_palette = par->pseudo_palette;
info->screen_base = (char __iomem *)par->vmalloc;
info->screen_size = fb_size;
 
info->flags = FBINFO_DEFAULT;
info->fbops = &vmw_fb_ops;
 
/* 24 depth per default */
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
 
info->var.xres_virtual = fb_width;
info->var.yres_virtual = fb_height;
info->var.bits_per_pixel = fb_bpp;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1;
info->var.width = -1;
 
/*
* Dirty & Deferred IO
*/
par->dirty.x1 = par->dirty.x2 = 0;
par->dirty.y1 = par->dirty.y2 = 0;
par->dirty.active = true;
spin_lock_init(&par->dirty.lock);
mutex_init(&par->bo_mutex);
// fb_deferred_io_init(info);
 
vmw_fb_set_par(info);
 
return 0;
 
err_defio:
fb_deferred_io_cleanup(info);
err_aper:
err_free:
vfree(par->vmalloc);
err_kms:
framebuffer_release(info);
vmw_priv->fb_info = NULL;
 
return ret;
}
 
int vmw_fb_close(struct vmw_private *vmw_priv)
{
struct fb_info *info;
struct vmw_fb_par *par;
 
if (!vmw_priv->fb_info)
return 0;
 
info = vmw_priv->fb_info;
par = info->par;
 
/* ??? order */
 
(void) vmw_fb_kms_detach(par, true, true);
 
vfree(par->vmalloc);
framebuffer_release(info);
 
return 0;
}
 
int vmw_fb_off(struct vmw_private *vmw_priv)
{
struct fb_info *info;
struct vmw_fb_par *par;
unsigned long flags;
 
if (!vmw_priv->fb_info)
return -EINVAL;
 
info = vmw_priv->fb_info;
par = info->par;
 
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = false;
spin_unlock_irqrestore(&par->dirty.lock, flags);
 
mutex_lock(&par->bo_mutex);
(void) vmw_fb_kms_detach(par, true, false);
mutex_unlock(&par->bo_mutex);
 
return 0;
}
 
int vmw_fb_on(struct vmw_private *vmw_priv)
{
struct fb_info *info;
struct vmw_fb_par *par;
unsigned long flags;
 
if (!vmw_priv->fb_info)
return -EINVAL;
 
info = vmw_priv->fb_info;
par = info->par;
 
vmw_fb_set_par(info);
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags);
return 0;
}
 
void vmw_fb_update(struct vmw_private *vmw_priv)
{
struct fb_info *info = vmw_priv->fb_info;
struct vmw_fb_par *par = info->par;
vmw_fb_dirty_mark(par, 0, 0,
par->set_fb->width, par->set_fb->height);
}
/drivers/video/drm/vmwgfx/vmwgfx_fence.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
46,6 → 46,7
bool goal_irq_on; /* Protected by @goal_irq_mutex */
bool seqno_valid; /* Protected by @lock, and may not be set to true
without the @goal_irq_mutex held. */
unsigned ctx;
};
 
struct vmw_user_fence {
80,6 → 81,12
uint32_t *tv_usec;
};
 
static struct vmw_fence_manager *
fman_from_fence(struct vmw_fence_obj *fence)
{
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
 
/**
* Note on fencing subsystem usage of irqs:
* Typically the vmw_fences_update function is called
102,26 → 109,128
* objects with actions attached to them.
*/
 
static void vmw_fence_obj_destroy_locked(struct kref *kref)
static void vmw_fence_obj_destroy(struct fence *f)
{
struct vmw_fence_obj *fence =
container_of(kref, struct vmw_fence_obj, kref);
container_of(f, struct vmw_fence_obj, base);
 
struct vmw_fence_manager *fman = fence->fman;
unsigned int num_fences;
struct vmw_fence_manager *fman = fman_from_fence(fence);
unsigned long irq_flags;
 
spin_lock_irqsave(&fman->lock, irq_flags);
list_del_init(&fence->head);
num_fences = --fman->num_fence_objects;
spin_unlock_irq(&fman->lock);
if (fence->destroy)
--fman->num_fence_objects;
spin_unlock_irqrestore(&fman->lock, irq_flags);
fence->destroy(fence);
else
kfree(fence);
}
 
spin_lock_irq(&fman->lock);
static const char *vmw_fence_get_driver_name(struct fence *f)
{
return "vmwgfx";
}
 
static const char *vmw_fence_get_timeline_name(struct fence *f)
{
return "svga";
}
 
static bool vmw_fence_enable_signaling(struct fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
 
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
 
u32 *fifo_mem = dev_priv->mmio_virt;
u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
 
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 
return true;
}
 
struct vmwgfx_wait_cb {
struct fence_cb base;
struct task_struct *task;
};
 
static void
vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
{
struct vmwgfx_wait_cb *wait =
container_of(cb, struct vmwgfx_wait_cb, base);
 
// wake_up_process(wait->task);
}
 
static void __vmw_fences_update(struct vmw_fence_manager *fman);
 
static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
 
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
struct vmwgfx_wait_cb cb;
long ret = timeout;
unsigned long irq_flags;
 
if (likely(vmw_fence_obj_signaled(fence)))
return timeout;
 
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv);
 
spin_lock_irqsave(f->lock, irq_flags);
 
// if (intr && signal_pending(current)) {
// ret = -ERESTARTSYS;
// goto out;
// }
 
cb.base.func = vmwgfx_wait_cb;
cb.task = current;
list_add(&cb.base.node, &f->cb_list);
 
while (ret > 0) {
__vmw_fences_update(fman);
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
 
spin_unlock_irqrestore(f->lock, irq_flags);
 
// ret = schedule_timeout(ret);
delay(1);
ret = 0;
spin_lock_irqsave(f->lock, irq_flags);
// if (ret > 0 && intr && signal_pending(current))
// ret = -ERESTARTSYS;
}
 
if (!list_empty(&cb.base.node))
list_del(&cb.base.node);
 
out:
spin_unlock_irqrestore(f->lock, irq_flags);
 
vmw_seqno_waiter_remove(dev_priv);
 
return ret;
}
 
static struct fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
.wait = vmw_fence_wait,
.release = vmw_fence_obj_destroy,
};
 
 
/**
* Execute signal actions on fences recently signaled.
* This is done from a workqueue so we don't have to execute
186,6 → 295,7
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
fman->ctx = fence_context_alloc(1);
 
return fman;
}
207,23 → 317,16
}
 
static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
struct vmw_fence_obj *fence,
u32 seqno,
uint32_t mask,
struct vmw_fence_obj *fence, u32 seqno,
void (*destroy) (struct vmw_fence_obj *fence))
{
unsigned long irq_flags;
unsigned int num_fences;
int ret = 0;
 
fence->seqno = seqno;
fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
fman->ctx, seqno);
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->fman = fman;
fence->signaled = 0;
fence->signal_mask = mask;
kref_init(&fence->kref);
fence->destroy = destroy;
init_waitqueue_head(&fence->queue);
 
spin_lock_irqsave(&fman->lock, irq_flags);
if (unlikely(fman->fifo_down)) {
231,7 → 334,7
goto out_unlock;
}
list_add_tail(&fence->head, &fman->fence_list);
num_fences = ++fman->num_fence_objects;
++fman->num_fence_objects;
 
out_unlock:
spin_unlock_irqrestore(&fman->lock, irq_flags);
239,38 → 342,6
 
}
 
struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (unlikely(fence == NULL))
return NULL;
 
kref_get(&fence->kref);
return fence;
}
 
/**
* vmw_fence_obj_unreference
*
* Note that this function may not be entered with disabled irqs since
* it may re-enable them in the destroy function.
*
*/
void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
{
struct vmw_fence_obj *fence = *fence_p;
struct vmw_fence_manager *fman;
 
if (unlikely(fence == NULL))
return;
 
fman = fence->fman;
*fence_p = NULL;
spin_lock_irq(&fman->lock);
BUG_ON(atomic_read(&fence->kref.refcount) == 0);
kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
spin_unlock_irq(&fman->lock);
}
 
static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
struct list_head *list)
{
311,7 → 382,7
u32 passed_seqno)
{
u32 goal_seqno;
__le32 __iomem *fifo_mem;
u32 *fifo_mem;
struct vmw_fence_obj *fence;
 
if (likely(!fman->seqno_valid))
318,7 → 389,7
return false;
 
fifo_mem = fman->dev_priv->mmio_virt;
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;
 
326,7 → 397,7
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
iowrite32(fence->seqno,
vmw_mmio_write(fence->base.seqno,
fifo_mem + SVGA_FIFO_FENCE_GOAL);
break;
}
353,55 → 424,47
*/
static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
__le32 __iomem *fifo_mem;
u32 *fifo_mem;
 
if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
if (fence_is_signaled_locked(&fence->base))
return false;
 
fifo_mem = fence->fman->dev_priv->mmio_virt;
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
if (likely(fence->fman->seqno_valid &&
goal_seqno - fence->seqno < VMW_FENCE_WRAP))
fifo_mem = fman->dev_priv->mmio_virt;
goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;
 
iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
fence->fman->seqno_valid = true;
vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
fman->seqno_valid = true;
 
return true;
}
 
void vmw_fences_update(struct vmw_fence_manager *fman)
static void __vmw_fences_update(struct vmw_fence_manager *fman)
{
unsigned long flags;
struct vmw_fence_obj *fence, *next_fence;
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
u32 *fifo_mem = fman->dev_priv->mmio_virt;
 
seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
rerun:
spin_lock_irqsave(&fman->lock, flags);
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->seqno < VMW_FENCE_WRAP) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
fence_signal_locked(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
vmw_fences_perform_actions(fman, &action_list);
wake_up_all(&fence->queue);
} else
break;
}
 
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
 
// if (!list_empty(&fman->cleanup_list))
// (void) schedule_work(&fman->work);
spin_unlock_irqrestore(&fman->lock, flags);
 
/*
* Rerun if the fence goal seqno was updated, and the
* hardware might have raced with that update, so that
408,77 → 471,54
* we missed a fence_goal irq.
*/
 
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
}
}
 
}
 
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
uint32_t flags)
void vmw_fences_update(struct vmw_fence_manager *fman)
{
struct vmw_fence_manager *fman = fence->fman;
unsigned long irq_flags;
uint32_t signaled;
 
spin_lock_irqsave(&fman->lock, irq_flags);
signaled = fence->signaled;
__vmw_fences_update(fman);
spin_unlock_irqrestore(&fman->lock, irq_flags);
}
 
flags &= fence->signal_mask;
if ((signaled & flags) == flags)
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
 
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return 1;
 
if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
vmw_fences_update(fman);
 
spin_lock_irqsave(&fman->lock, irq_flags);
signaled = fence->signaled;
spin_unlock_irqrestore(&fman->lock, irq_flags);
 
return ((signaled & flags) == flags);
return fence_is_signaled(&fence->base);
}
 
int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
uint32_t flags, bool lazy,
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
bool interruptible, unsigned long timeout)
{
struct vmw_private *dev_priv = fence->fman->dev_priv;
long ret;
long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
 
if (likely(vmw_fence_obj_signaled(fence, flags)))
if (likely(ret > 0))
return 0;
 
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv);
 
if (interruptible)
ret = wait_event_interruptible_timeout
(fence->queue,
vmw_fence_obj_signaled(fence, flags),
timeout);
else if (ret == 0)
return -EBUSY;
else
ret = wait_event_timeout
(fence->queue,
vmw_fence_obj_signaled(fence, flags),
timeout);
 
vmw_seqno_waiter_remove(dev_priv);
 
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
 
return ret;
}
 
void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
{
struct vmw_private *dev_priv = fence->fman->dev_priv;
struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
 
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
}
485,37 → 525,21
 
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fence->fman;
 
kfree(fence);
/*
* Free kernel space accounting.
*/
ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
fman->fence_size);
fence_free(&fence->base);
}
 
int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
uint32_t mask,
struct vmw_fence_obj **p_fence)
{
struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
struct vmw_fence_obj *fence;
int ret;
 
ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
false, false);
if (unlikely(ret != 0))
return ret;
 
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL)) {
ret = -ENOMEM;
goto out_no_object;
}
if (unlikely(fence == NULL))
return -ENOMEM;
 
ret = vmw_fence_obj_init(fman, fence, seqno, mask,
ret = vmw_fence_obj_init(fman, fence, seqno,
vmw_fence_destroy);
if (unlikely(ret != 0))
goto out_err_init;
525,8 → 549,6
 
out_err_init:
kfree(fence);
out_no_object:
ttm_mem_global_free(mem_glob, fman->fence_size);
return ret;
}
 
535,9 → 557,9
{
struct vmw_user_fence *ufence =
container_of(fence, struct vmw_user_fence, fence);
struct vmw_fence_manager *fman = fence->fman;
struct vmw_fence_manager *fman = fman_from_fence(fence);
 
// ttm_base_object_kfree(ufence, base);
ttm_base_object_kfree(ufence, base);
/*
* Free kernel space accounting.
*/
559,7 → 581,6
int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_manager *fman,
uint32_t seqno,
uint32_t mask,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle)
{
586,7 → 607,7
}
 
ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
mask, vmw_user_fence_destroy);
vmw_user_fence_destroy);
if (unlikely(ret != 0)) {
kfree(ufence);
goto out_no_object;
629,7 → 650,6
 
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
unsigned long irq_flags;
struct list_head action_list;
int ret;
 
638,35 → 658,32
* restart when we've released the fman->lock.
*/
 
spin_lock_irqsave(&fman->lock, irq_flags);
spin_lock_irq(&fman->lock);
fman->fifo_down = true;
while (!list_empty(&fman->fence_list)) {
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
kref_get(&fence->kref);
fence_get(&fence->base);
spin_unlock_irq(&fman->lock);
 
ret = vmw_fence_obj_wait(fence, fence->signal_mask,
false, false,
ret = vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
 
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
fence_signal(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
vmw_fences_perform_actions(fman, &action_list);
wake_up_all(&fence->queue);
}
 
BUG_ON(!list_empty(&fence->head));
fence_put(&fence->base);
spin_lock_irq(&fman->lock);
 
BUG_ON(!list_empty(&fence->head));
kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
}
spin_unlock_irqrestore(&fman->lock, irq_flags);
spin_unlock_irq(&fman->lock);
}
 
void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
716,7 → 733,7
 
timeout = jiffies;
if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
ret = ((vmw_fence_obj_signaled(fence)) ?
0 : -EBUSY);
goto out;
}
723,7 → 740,7
 
timeout = (unsigned long)arg->kernel_cookie - timeout;
 
ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
 
out:
ttm_base_object_unref(&base);
758,12 → 775,12
}
 
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fence->fman;
fman = fman_from_fence(fence);
 
arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
arg->signaled = vmw_fence_obj_signaled(fence);
 
arg->signaled_flags = arg->flags;
spin_lock_irq(&fman->lock);
 
arg->signaled_flags = fence->signaled;
arg->passed_seqno = dev_priv->last_read_seqno;
spin_unlock_irq(&fman->lock);
 
847,15 → 864,15
 
file_priv = event->file_priv;
spin_lock_irqsave(&dev->event_lock, irq_flags);
/*
 
if (likely(eaction->tv_sec != NULL)) {
struct timeval tv;
 
do_gettimeofday(&tv);
// do_gettimeofday(&tv);
*eaction->tv_sec = tv.tv_sec;
*eaction->tv_usec = tv.tv_usec;
}
*/
 
list_del_init(&eaction->fpriv_head);
list_add_tail(&eaction->event->link, &file_priv->event_list);
eaction->event = NULL;
876,7 → 893,7
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
struct vmw_fence_manager *fman = eaction->fence->fman;
struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
unsigned long irq_flags;
 
spin_lock_irqsave(&fman->lock, irq_flags);
900,7 → 917,7
static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fence->fman;
struct vmw_fence_manager *fman = fman_from_fence(fence);
unsigned long irq_flags;
bool run_update = false;
 
908,7 → 925,7
spin_lock_irqsave(&fman->lock, irq_flags);
 
fman->pending_actions[action->type]++;
if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
if (fence_is_signaled_locked(&fence->base)) {
struct list_head action_list;
 
INIT_LIST_HEAD(&action_list);
960,7 → 977,7
bool interruptible)
{
struct vmw_event_fence_action *eaction;
struct vmw_fence_manager *fman = fence->fman;
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
unsigned long irq_flags;
 
1000,7 → 1017,8
bool interruptible)
{
struct vmw_event_fence_pending *event;
struct drm_device *dev = fence->fman->dev_priv->dev;
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct drm_device *dev = fman->dev_priv->dev;
unsigned long irq_flags;
int ret;
 
1049,6 → 1067,8
if (ret != 0)
goto out_no_queue;
 
return 0;
 
out_no_queue:
event->base.destroy(&event->base);
out_no_event:
1125,17 → 1145,10
 
BUG_ON(fence == NULL);
 
if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
ret = vmw_event_fence_action_create(file_priv, fence,
arg->flags,
arg->user_data,
true);
else
ret = vmw_event_fence_action_create(file_priv, fence,
arg->flags,
arg->user_data,
true);
 
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to attach event to fence.\n");
1154,5 → 1167,4
vmw_fence_obj_unreference(&fence);
return ret;
}
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_fence.h
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
27,6 → 27,8
 
#ifndef _VMWGFX_FENCE_H_
 
#include <linux/fence.h>
 
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
 
struct vmw_private;
50,16 → 52,11
};
 
struct vmw_fence_obj {
struct kref kref;
u32 seqno;
struct fence base;
 
struct vmw_fence_manager *fman;
struct list_head head;
uint32_t signaled;
uint32_t signal_mask;
struct list_head seq_passed_actions;
void (*destroy)(struct vmw_fence_obj *fence);
wait_queue_head_t queue;
};
 
extern struct vmw_fence_manager *
67,17 → 64,29
 
extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
 
extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
static inline void
vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
{
struct vmw_fence_obj *fence = *fence_p;
 
extern struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence);
*fence_p = NULL;
if (fence)
fence_put(&fence->base);
}
 
static inline struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (fence)
fence_get(&fence->base);
return fence;
}
 
extern void vmw_fences_update(struct vmw_fence_manager *fman);
 
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
uint32_t flags);
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
 
extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
bool lazy,
bool interruptible, unsigned long timeout);
 
85,13 → 94,11
 
extern int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
uint32_t mask,
struct vmw_fence_obj **p_fence);
 
extern int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_manager *fman,
uint32_t sequence,
uint32_t mask,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
 
111,7 → 118,6
struct drm_file *file_priv);
extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
struct list_head *event_list);
/*
extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
struct vmw_fence_obj *fence,
struct drm_pending_event *event,
118,6 → 124,4
uint32_t *tv_sec,
uint32_t *tv_usec,
bool interruptible);
*/
 
#endif /* _VMWGFX_FENCE_H_ */
/drivers/video/drm/vmwgfx/vmwgfx_fifo.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
24,20 → 24,19
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#define mb() asm volatile("mfence" : : : "memory")
#define rmb() asm volatile("lfence" : : : "memory")
#define wmb() asm volatile("sfence" : : : "memory")
 
#include "vmwgfx_drv.h"
#include <drm/drmP.h>
#include <drm/ttm/ttm_placement.h>
 
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
struct vmw_temp_set_context {
SVGA3dCmdHeader header;
SVGA3dCmdDXTempSetContext body;
};
 
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
50,10 → 49,10
if (!dev_priv->has_mob)
return false;
 
mutex_lock(&dev_priv->hw_mutex);
spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
mutex_unlock(&dev_priv->hw_mutex);
spin_unlock(&dev_priv->cap_lock);
 
return (result != 0);
}
61,11 → 60,11
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
 
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
 
hwversion = ioread32(fifo_mem +
hwversion = vmw_mmio_read(fifo_mem +
((fifo->capabilities &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
77,8 → 76,8
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
return false;
 
/* Non-Screen Object path does not support surfaces */
if (!dev_priv->sou_priv)
/* Legacy Display Unit does not support surfaces */
if (dev_priv->active_display_unit == vmw_du_legacy)
return false;
 
return true;
86,13 → 85,13
 
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
 
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
 
caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
 
101,13 → 100,13
 
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t dummy;
 
fifo->dx = false;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = KernelAlloc(fifo->static_buffer_size);
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
return -ENOMEM;
 
116,22 → 115,20
fifo->using_bounce_buffer = false;
 
mutex_init(&fifo->fifo_mutex);
// init_rwsem(&fifo->rwsem);
init_rwsem(&fifo->rwsem);
 
/*
* Allow mapping the first page read-only to user-space.
*/
 
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
 
mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
 
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
SVGA_REG_ENABLE_HIDE);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
 
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
140,20 → 137,19
if (min < PAGE_SIZE)
min = PAGE_SIZE;
 
iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
wmb();
iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP);
vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
mb();
 
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
mutex_unlock(&dev_priv->hw_mutex);
 
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
 
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
161,37 → 157,31
(unsigned int) fifo->capabilities);
 
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_marker_queue_init(&fifo->marker_queue);
 
int ret = 0; //vmw_fifo_send_fence(dev_priv, &dummy);
return ret;
return 0;
}
 
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 *fifo_mem = dev_priv->mmio_virt;
 
mutex_lock(&dev_priv->hw_mutex);
 
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
preempt_disable();
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
preempt_enable();
}
 
mutex_unlock(&dev_priv->hw_mutex);
}
 
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 *fifo_mem = dev_priv->mmio_virt;
 
mutex_lock(&dev_priv->hw_mutex);
 
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
;
 
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
200,7 → 190,6
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
 
mutex_unlock(&dev_priv->hw_mutex);
vmw_marker_queue_takedown(&fifo->marker_queue);
 
if (likely(fifo->static_buffer != NULL)) {
216,11 → 205,11
 
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
 
return ((max - next_cmd) + (stop - min) <= bytes);
}
259,7 → 248,6
unsigned long timeout)
{
long ret = 1L;
unsigned long irq_flags;
 
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
return 0;
269,16 → 257,8
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
 
mutex_lock(&dev_priv->hw_mutex);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
&dev_priv->fifo_queue_waiters);
 
if (interruptible)
ret = wait_event_interruptible_timeout
294,14 → 274,8
else if (likely(ret > 0))
ret = 0;
 
mutex_lock(&dev_priv->hw_mutex);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
&dev_priv->fifo_queue_waiters);
 
return ret;
}
316,10 → 290,11
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
327,9 → 302,9
int ret;
 
mutex_lock(&fifo_state->fifo_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
 
if (unlikely(bytes >= (max - min)))
goto out_err;
340,7 → 315,7
fifo_state->reserved_size = bytes;
 
while (1) {
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
 
374,9 → 349,10
fifo_state->using_bounce_buffer = false;
 
if (reserveable)
iowrite32(bytes, fifo_mem +
vmw_mmio_write(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
return fifo_mem + (next_cmd >> 2);
return (void __force *) (fifo_mem +
(next_cmd >> 2));
} else {
need_bounce = true;
}
387,7 → 363,7
if (bytes < fifo_state->static_buffer_size)
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = kmalloc(bytes,0);
fifo_state->dynamic_buffer = vmalloc(bytes);
return fifo_state->dynamic_buffer;
}
}
395,11 → 371,36
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
 
return NULL;
}
 
void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
int ctx_id)
{
void *ret;
 
if (dev_priv->cman)
ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
ctx_id, false, NULL);
else if (ctx_id == SVGA3D_INVALID_ID)
ret = vmw_local_fifo_reserve(dev_priv, bytes);
else {
WARN(1, "Command buffer has not been allocated.\n");
ret = NULL;
}
if (IS_ERR_OR_NULL(ret)) {
DRM_ERROR("Fifo reserve failure of %u bytes.\n",
(unsigned) bytes);
// dump_stack();
return NULL;
}
 
return ret;
}
 
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
u32 *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
411,17 → 412,16
if (bytes < chunk_size)
chunk_size = bytes;
 
iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
mb();
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
if (rest)
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
rest);
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
}
 
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
u32 *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
429,26 → 429,30
fifo_state->dynamic_buffer : fifo_state->static_buffer;
 
while (bytes > 0) {
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
mb();
bytes -= sizeof(uint32_t);
}
}
 
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
if (fifo_state->dx)
bytes += sizeof(struct vmw_temp_set_context);
 
fifo_state->dx = false;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
 
469,30 → 473,70
 
}
 
// down_write(&fifo_state->rwsem);
down_write(&fifo_state->rwsem);
if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes;
if (next_cmd >= max)
next_cmd -= max - min;
mb();
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
}
 
if (reserveable)
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
mb();
// up_write(&fifo_state->rwsem);
up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
}
 
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
else
vmw_local_fifo_commit(dev_priv, bytes);
}
 
 
/**
* vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
*
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
*/
void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
else
vmw_local_fifo_commit(dev_priv, bytes);
}
 
/**
* vmw_fifo_flush - Flush any buffered commands and make sure command processing
* starts.
*
* @dev_priv: Pointer to device private structure.
* @interruptible: Whether to wait interruptible if function needs to sleep.
*/
int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
{
might_sleep();
 
if (dev_priv->cman)
return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
else
return 0;
}
 
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
void *fm;
u32 *fm;
int ret = 0;
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
 
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
518,12 → 562,10
return 0;
}
 
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
cmd_fence = (struct svga_fifo_cmd_fence *)
((unsigned long)fm + sizeof(__le32));
 
iowrite32(*seqno, &cmd_fence->fence);
vmw_fifo_commit(dev_priv, bytes);
*fm++ = SVGA_CMD_FENCE;
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_fifo_commit_flush(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state);
 
549,7 → 591,7
* without writing to the query result structure.
*/
 
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
598,7 → 640,7
* without writing to the query result structure.
*/
 
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
651,3 → 693,8
 
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
 
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
}
/drivers/video/drm/vmwgfx/vmwgfx_gmr.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
24,9 → 24,6
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#define mb() asm volatile("mfence" : : : "memory")
#define rmb() asm volatile("lfence" : : : "memory")
#define wmb() asm volatile("sfence" : : : "memory")
 
#include "vmwgfx_drv.h"
#include <drm/drmP.h>
/drivers/video/drm/vmwgfx/vmwgfx_gmrid_manager.c
46,8 → 46,7
 
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
uint32_t flags,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct vmwgfx_gmrid_man *gman =
/drivers/video/drm/vmwgfx/vmwgfx_irq.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
28,9 → 28,6
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
 
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
 
#define VMW_FENCE_WRAP (1 << 24)
 
irqreturn_t vmw_irq_handler(int irq, void *arg)
39,15 → 36,13
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status;
 
spin_lock(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & dev_priv->irq_mask;
spin_unlock(&dev_priv->irq_lock);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
 
if (likely(status))
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
if (!masked_status)
if (!status)
return IRQ_NONE;
 
if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
65,20 → 60,15
 
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
uint32_t busy;
 
mutex_lock(&dev_priv->hw_mutex);
busy = vmw_read(dev_priv, SVGA_REG_BUSY);
mutex_unlock(&dev_priv->hw_mutex);
 
return (busy == 0);
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
 
void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 
if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno;
167,8 → 157,9
}
// finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
u32 *fifo_mem = dev_priv->mmio_virt;
 
vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
}
wake_up_all(&dev_priv->fence_queue);
// if (fifo_idle)
177,66 → 168,52
return ret;
}
 
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
void vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
mutex_lock(&dev_priv->hw_mutex);
if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_ANY_FENCE,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
spin_lock(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
spin_unlock(&dev_priv->waiter_lock);
}
 
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
mutex_lock(&dev_priv->hw_mutex);
if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
spin_lock(&dev_priv->waiter_lock);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
spin_unlock(&dev_priv->waiter_lock);
}
 
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
&dev_priv->fence_queue_waiters);
}
 
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
&dev_priv->fence_queue_waiters);
}
 
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->hw_mutex);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FENCE_GOAL,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
&dev_priv->goal_queue_waiters);
}
mutex_unlock(&dev_priv->hw_mutex);
}
 
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->hw_mutex);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
&dev_priv->goal_queue_waiters);
}
mutex_unlock(&dev_priv->hw_mutex);
}
 
int vmw_wait_seqno(struct vmw_private *dev_priv,
bool lazy, uint32_t seqno,
292,7 → 269,6
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
 
spin_lock_init(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
310,17 → 286,8
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
 
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
mutex_unlock(&dev_priv->hw_mutex);
 
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
 
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
list_del_init(&wait->task_list);
return 1;
}
 
/drivers/video/drm/vmwgfx/vmwgfx_kms.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
31,50 → 31,13
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
 
struct vmw_clip_rect {
int x1, x2, y1, y2;
};
 
/**
* Clip @num_rects number of @rects against @clip storing the
* results in @out_rects and the number of passed rects in @out_num.
*/
static void vmw_clip_cliprects(struct drm_clip_rect *rects,
int num_rects,
struct vmw_clip_rect clip,
SVGASignedRect *out_rects,
int *out_num)
void vmw_du_cleanup(struct vmw_display_unit *du)
{
int i, k;
 
for (i = 0, k = 0; i < num_rects; i++) {
int x1 = max_t(int, clip.x1, rects[i].x1);
int y1 = max_t(int, clip.y1, rects[i].y1);
int x2 = min_t(int, clip.x2, rects[i].x2);
int y2 = min_t(int, clip.y2, rects[i].y2);
 
if (x1 >= x2)
continue;
if (y1 >= y2)
continue;
 
out_rects[k].left = x1;
out_rects[k].top = y1;
out_rects[k].right = x2;
out_rects[k].bottom = y2;
k++;
}
 
*out_num = k;
}
 
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
// if (du->cursor_surface)
// vmw_surface_unreference(&du->cursor_surface);
// if (du->cursor_dmabuf)
// vmw_dmabuf_unreference(&du->cursor_dmabuf);
drm_connector_unregister(&du->connector);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
120,14 → 83,14
for(i = 0; i < 64*(64-32); i++)
*dst++ = 0;
 
cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
cmd->cursor.id = cpu_to_le32(0);
cmd->cursor.width = cpu_to_le32(width);
cmd->cursor.height = cpu_to_le32(height);
cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
cmd->cursor.id = 0;
cmd->cursor.width = width;
cmd->cursor.height = height;
cmd->cursor.hotspotX = hotspotX;
cmd->cursor.hotspotY = hotspotY;
 
vmw_fifo_commit(dev_priv, cmd_size);
vmw_fifo_commit_flush(dev_priv, cmd_size);
 
return 0;
}
173,24 → 136,29
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
 
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
}
 
#if 0
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
/*
* vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
*/
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *dmabuf = NULL;
s32 hotspot_x, hotspot_y;
int ret;
 
/*
200,8 → 168,10
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
drm_modeset_unlock(&crtc->mutex);
drm_modeset_unlock_crtc(crtc);
drm_modeset_lock_all(dev_priv->dev);
hotspot_x = hot_x + du->hotspot_x;
hotspot_y = hot_y + du->hotspot_y;
 
/* A lot of the code assumes this */
if (handle && (width != 64 || height != 64)) {
238,6 → 208,7
vmw_dmabuf_unreference(&du->cursor_dmabuf);
 
/* setup new image */
ret = 0;
if (surface) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_surface = surface;
244,28 → 215,30
 
du->cursor_surface->snooper.crtc = crtc;
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, hotspot_x, hotspot_y);
} else if (dmabuf) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
 
ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
du->hotspot_x, du->hotspot_y);
hotspot_x, hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
ret = 0;
goto out;
}
 
if (!ret) {
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
du->cursor_x + hotspot_x,
du->cursor_y + hotspot_y);
du->core_hotspot_x = hot_x;
du->core_hotspot_y = hot_y;
}
 
ret = 0;
out:
drm_modeset_unlock_all(dev_priv->dev);
drm_modeset_lock(&crtc->mutex, NULL);
drm_modeset_lock_crtc(crtc, crtc->cursor);
 
return ret;
}
286,15 → 259,17
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
drm_modeset_unlock(&crtc->mutex);
drm_modeset_unlock_crtc(crtc);
drm_modeset_lock_all(dev_priv->dev);
 
vmw_cursor_update_position(dev_priv, shown,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
du->cursor_x + du->hotspot_x +
du->core_hotspot_x,
du->cursor_y + du->hotspot_y +
du->core_hotspot_y);
 
drm_modeset_unlock_all(dev_priv->dev);
drm_modeset_lock(&crtc->mutex, NULL);
drm_modeset_lock_crtc(crtc, crtc->cursor);
 
return 0;
}
380,20 → 355,34
 
srf->snooper.age++;
 
/* we can't call this function from this function since execbuf has
* reserved fifo space.
*
* if (srf->snooper.crtc)
* vmw_ldu_crtc_cursor_update_image(dev_priv,
* srf->snooper.image, 64, 64,
* du->hotspot_x, du->hotspot_y);
*/
 
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(bo);
}
 
/**
* vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
*
* @dev_priv: Pointer to the device private struct.
*
* Clears all legacy hotspots.
*/
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
 
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev) {
du = vmw_crtc_to_du(crtc);
 
du->hotspot_x = 0;
du->hotspot_y = 0;
}
drm_modeset_unlock_all(dev);
}
 
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
411,7 → 400,9
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv,
du->cursor_surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
64, 64,
du->hotspot_x + du->core_hotspot_x,
du->hotspot_y + du->core_hotspot_y);
}
 
mutex_unlock(&dev->mode_config.mutex);
426,179 → 417,19
* Surface framebuffer code
*/
 
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
 
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
struct list_head head;
struct drm_master *master;
};
 
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct vmw_master *vmaster = vmw_master(vfbs->master);
 
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
if (vfbs->base.user_obj)
ttm_base_object_unref(&vfbs->base.user_obj);
 
mutex_lock(&vmaster->fb_surf_mutex);
list_del(&vfbs->head);
mutex_unlock(&vmaster->fb_surf_mutex);
 
 
kfree(vfbs);
}
 
static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
SVGASignedRect *blits;
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
if (crtc->primary->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
 
BUG_ON(!clips || !num_clips);
 
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
 
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
ret = -ENOMEM;
goto out_free_tmp;
}
 
/* setup blits pointer */
blits = (SVGASignedRect *)&cmd[1];
 
/* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
bottom = clips->y2;
 
/* skip the first clip rect */
for (i = 1, clips_ptr = clips + inc;
i < num_clips; i++, clips_ptr += inc) {
left = min_t(int, left, (int)clips_ptr->x1);
right = max_t(int, right, (int)clips_ptr->x2);
top = min_t(int, top, (int)clips_ptr->y1);
bottom = max_t(int, bottom, (int)clips_ptr->y2);
}
 
/* only need to do this once */
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
 
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
 
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
tmp[i].x1 = clips_ptr->x1 - left;
tmp[i].x2 = clips_ptr->x2 - left;
tmp[i].y1 = clips_ptr->y1 - top;
tmp[i].y2 = clips_ptr->y2 - top;
}
 
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
struct vmw_clip_rect clip;
int num;
 
clip.x1 = left - unit->crtc.x;
clip.y1 = top - unit->crtc.y;
clip.x2 = right - unit->crtc.x;
clip.y2 = bottom - unit->crtc.y;
 
/* skip any crtcs that misses the clip region */
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
 
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
 
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
 
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
cmd->body.destScreenId = unit->unit;
 
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
/* if no cliprects hit skip this */
if (num == 0)
continue;
 
/* only return the last fence */
if (out_fence && *out_fence)
vmw_fence_obj_unreference(out_fence);
 
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, out_fence);
 
if (unlikely(ret != 0))
break;
}
 
 
kfree(cmd);
out_free_tmp:
kfree(tmp);
 
return ret;
}
 
static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
611,13 → 442,10
struct drm_clip_rect norect;
int ret, inc = 1;
 
if (unlikely(vfbs->master != file_priv->master))
/* Legacy Display Unit does not support 3D */
if (dev_priv->active_display_unit == vmw_du_legacy)
return -EINVAL;
 
/* Require ScreenObject support for 3D */
if (!dev_priv->sou_priv)
return -EINVAL;
 
drm_modeset_lock_all(dev_priv->dev);
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
637,10 → 465,16
inc = 2; /* skip source rects */
}
 
ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
flags, color,
clips, num_clips, inc, NULL);
if (dev_priv->active_display_unit == vmw_du_screen_object)
ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
clips, NULL, NULL, 0, 0,
num_clips, inc, NULL);
else
ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
clips, NULL, NULL, 0, 0,
num_clips, inc, NULL);
 
vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
 
drm_modeset_unlock_all(dev_priv->dev);
648,6 → 482,46
return 0;
}
 
/**
* vmw_kms_readback - Perform a readback from the screen system to
* a dma-buffer backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
* @vfb: Pointer to the dma-buffer backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
* @num_clips: Number of clip rects in @vclips.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
uint32_t num_clips)
{
switch (dev_priv->active_display_unit) {
case vmw_du_screen_object:
return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
user_fence_rep, vclips, num_clips);
case vmw_du_screen_target:
return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
user_fence_rep, NULL, vclips, num_clips,
1, false, true);
default:
WARN_ONCE(true,
"Readback called with invalid display system.\n");
}
 
return -ENOSYS;
}
 
 
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
654,21 → 528,20
};
 
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
*mode_cmd)
*mode_cmd,
bool is_dmabuf_proxy)
 
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/* 3D is only supported on HWv8 hosts which supports screen objects */
if (!dev_priv->sou_priv)
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
return -ENOSYS;
 
/*
702,15 → 575,16
case 15:
format = SVGA3D_A1R5G5B5;
break;
case 8:
format = SVGA3D_LUMINANCE8;
break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
 
if (unlikely(format != surface->format)) {
/*
* For DX, surface format validation is done when surface->scanout
* is set.
*/
if (!dev_priv->has_dx && format != surface->format) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
721,12 → 595,6
goto out_err1;
}
 
if (!vmw_surface_reference(surface)) {
DRM_ERROR("failed to reference surface %p\n", surface);
ret = -EINVAL;
goto out_err2;
}
 
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
vfbs->base.base.pitches[0] = mode_cmd->pitch;
733,26 → 601,21
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
vfbs->surface = surface;
vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handle;
// vfbs->master = drm_master_get(file_priv->master);
vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
 
mutex_lock(&vmaster->fb_surf_mutex);
list_add_tail(&vfbs->head, &vmaster->fb_surf);
mutex_unlock(&vmaster->fb_surf_mutex);
 
*out = &vfbs->base;
 
ret = drm_framebuffer_init(dev, &vfbs->base.base,
&vmw_framebuffer_surface_funcs);
if (ret)
goto out_err3;
goto out_err2;
 
return 0;
 
out_err3:
out_err2:
vmw_surface_unreference(&surface);
out_err2:
kfree(vfbs);
out_err1:
return ret;
762,200 → 625,19
* Dmabuf framebuffer code
*/
 
#define vmw_framebuffer_to_vfbd(x) \
container_of(x, struct vmw_framebuffer_dmabuf, base.base)
 
struct vmw_framebuffer_dmabuf {
struct vmw_framebuffer base;
struct vmw_dma_buffer *buffer;
};
 
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
 
// drm_framebuffer_cleanup(framebuffer);
// vmw_dmabuf_unreference(&vfbd->buffer);
// ttm_base_object_unref(&vfbd->base.user_obj);
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
if (vfbd->base.user_obj)
ttm_base_object_unref(&vfbd->base.user_obj);
 
kfree(vfbd);
}
 
static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment)
{
size_t fifo_size;
int i;
 
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
 
fifo_size = sizeof(*cmd) * num_clips;
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
 
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd[i].body.x = cpu_to_le32(clips->x1);
cmd[i].body.y = cpu_to_le32(clips->y1);
cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
}
 
vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
 
static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
int depth = framebuffer->base.depth;
size_t fifo_size;
int ret;
 
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
 
/* Emulate RGBA support, contrary to svga_reg.h this is not
* supported by hosts. This is only a problem if we are reading
* this value later and expecting what we uploaded back.
*/
if (depth == 32)
depth = 24;
 
fifo_size = sizeof(*cmd);
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
}
 
memset(cmd, 0, fifo_size);
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
cmd->body.ptr.gmrId = framebuffer->user_handle;
cmd->body.ptr.offset = 0;
 
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, NULL);
 
kfree(cmd);
 
return ret;
}
 
static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment,
struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
int i, k, num_units, ret;
struct drm_crtc *crtc;
size_t fifo_size;
 
struct {
uint32_t header;
SVGAFifoCmdBlitGMRFBToScreen body;
} *blits;
 
ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
if (unlikely(ret != 0))
return ret; /* define_gmrfb prints warnings */
 
fifo_size = sizeof(*blits) * num_clips;
blits = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(blits == NULL)) {
DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
}
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->primary->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
 
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
int hit_num = 0;
 
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += increment) {
int clip_x1 = clips_ptr->x1 - unit->crtc.x;
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
int move_x, move_y;
 
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
clip_y1 >= unit->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
continue;
 
/* clip size to crtc size */
clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
 
/* translate both src and dest to bring clip into screen */
move_x = min_t(int, clip_x1, 0);
move_y = min_t(int, clip_y1, 0);
 
/* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
blits[hit_num].body.destRect.left = clip_x1 - move_x;
blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
}
 
/* no clips hit the crtc */
if (hit_num == 0)
continue;
 
/* only return the last fence */
if (out_fence && *out_fence)
vmw_fence_obj_unreference(out_fence);
 
fifo_size = sizeof(*blits) * hit_num;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
fifo_size, 0, NULL, out_fence);
 
if (unlikely(ret != 0))
break;
}
 
kfree(blits);
 
return ret;
}
 
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
987,16 → 669,29
increment = 2;
}
 
if (dev_priv->ldu_priv) {
ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
flags, color,
switch (dev_priv->active_display_unit) {
case vmw_du_screen_target:
ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
clips, NULL, num_clips, increment,
true, true);
break;
case vmw_du_screen_object:
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
clips, num_clips, increment,
true,
NULL);
break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
clips, num_clips, increment);
} else {
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
flags, color,
clips, num_clips, increment, NULL);
break;
default:
ret = -EINVAL;
WARN_ONCE(true, "Dirty called with invalid display system.\n");
break;
}
 
vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
 
drm_modeset_unlock_all(dev_priv->dev);
1012,42 → 707,133
/**
* Pin the dmabuffer to the start of vram.
*/
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
struct vmw_dma_buffer *buf;
int ret;
 
/* This code should not be used with screen objects */
BUG_ON(dev_priv->sou_priv);
buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
// vmw_overlay_pause_all(dev_priv);
if (!buf)
return 0;
 
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
switch (dev_priv->active_display_unit) {
case vmw_du_legacy:
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
vmw_overlay_resume_all(dev_priv);
break;
case vmw_du_screen_object:
case vmw_du_screen_target:
if (vfb->dmabuf)
return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
false);
 
// vmw_overlay_resume_all(dev_priv);
return vmw_dmabuf_pin_in_placement(dev_priv, buf,
&vmw_mob_placement, false);
default:
return -EINVAL;
}
 
WARN_ON(ret != 0);
 
return 0;
return ret;
}
 
static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
struct vmw_dma_buffer *buf;
 
if (!vfbd->buffer) {
WARN_ON(!vfbd->buffer);
buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
if (WARN_ON(!buf))
return 0;
 
return vmw_dmabuf_unpin(dev_priv, buf, false);
}
 
return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
/**
* vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
*
* @dev: DRM device
* @mode_cmd: parameters for the new surface
* @dmabuf_mob: MOB backing the DMA buf
* @srf_out: newly created surface
*
* When the content FB is a DMA buf, we create a surface as a proxy to the
* same buffer. This way we can do a surface copy rather than a surface DMA.
* This is a more efficient approach
*
* RETURNS:
* 0 on success, error code otherwise
*/
static int vmw_create_dmabuf_proxy(struct drm_device *dev,
const struct drm_mode_fb_cmd *mode_cmd,
struct vmw_dma_buffer *dmabuf_mob,
struct vmw_surface **srf_out)
{
uint32_t format;
struct drm_vmw_size content_base_size;
struct vmw_resource *res;
int ret;
 
switch (mode_cmd->depth) {
case 32:
case 24:
format = SVGA3D_X8R8G8B8;
break;
 
case 16:
case 15:
format = SVGA3D_R5G6B5;
break;
 
case 8:
format = SVGA3D_P8;
break;
 
default:
DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
return -EINVAL;
}
 
#if 0
content_base_size.width = mode_cmd->width;
content_base_size.height = mode_cmd->height;
content_base_size.depth = 1;
 
ret = vmw_surface_gb_priv_define(dev,
0, /* kernel visible only */
0, /* flags */
format,
true, /* can be a scanout buffer */
1, /* num of mip levels */
0,
0,
content_base_size,
srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
}
 
res = &(*srf_out)->res;
 
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
vmw_dmabuf_unreference(&res->backup);
res->backup = vmw_dmabuf_reference(dmabuf_mob);
res->backup_offset = 0;
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 
return 0;
}
 
 
 
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
1068,7 → 854,7
}
 
/* Limited framebuffer color depth support for screen objects */
if (dev_priv->sou_priv) {
if (dev_priv->active_display_unit == vmw_du_screen_object) {
switch (mode_cmd->depth) {
case 32:
case 24:
1100,23 → 886,13
goto out_err1;
}
 
if (!vmw_dmabuf_reference(dmabuf)) {
DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
ret = -EINVAL;
goto out_err2;
}
 
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
if (!dev_priv->sou_priv) {
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
}
vfbd->base.dmabuf = true;
vfbd->buffer = dmabuf;
vfbd->buffer = vmw_dmabuf_reference(dmabuf);
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
 
1123,20 → 899,84
ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_dmabuf_funcs);
if (ret)
goto out_err3;
goto out_err2;
 
return 0;
 
out_err3:
out_err2:
vmw_dmabuf_unreference(&dmabuf);
out_err2:
kfree(vfbd);
out_err1:
return ret;
}
#endif
 
/**
* vmw_kms_new_framebuffer - Create a new framebuffer.
*
* @dev_priv: Pointer to device private struct.
* @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
* Either @dmabuf or @surface must be NULL.
* @surface: Pointer to a surface to wrap the kms framebuffer around.
* Either @dmabuf or @surface must be NULL.
* @only_2d: No presents will occur to this dma buffer based framebuffer. This
* Helps the code to do some important optimizations.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
bool is_dmabuf_proxy = false;
int ret;
 
/*
* We cannot use the SurfaceDMA command in an non-accelerated VM,
* therefore, wrap the DMA buf in a surface so we can use the
* SurfaceCopy command.
*/
if (dmabuf && only_2d &&
dev_priv->active_display_unit == vmw_du_screen_target) {
ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
dmabuf, &surface);
if (ret)
return ERR_PTR(ret);
 
is_dmabuf_proxy = true;
}
 
/* Create the new framebuffer depending one what we have */
if (surface) {
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd,
is_dmabuf_proxy);
 
/*
* vmw_create_dmabuf_proxy() adds a reference that is no longer
* needed
*/
if (is_dmabuf_proxy)
vmw_surface_unreference(&surface);
} else if (dmabuf) {
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
mode_cmd);
} else {
BUG();
}
 
if (ret)
return ERR_PTR(ret);
 
vfb->pin = vmw_framebuffer_pin;
vfb->unpin = vmw_framebuffer_unpin;
 
return vfb;
}
 
/*
* Generic Kernel modesetting functions
*/
 
1169,7 → 1009,7
if (!vmw_kms_validate_mode_vram(dev_priv,
mode_cmd.pitch,
mode_cmd.height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
DRM_ERROR("Requested mode exceed bounding box limit.\n");
return ERR_PTR(-ENOMEM);
}
 
1193,32 → 1033,30
*/
 
/* returns either a dmabuf or surface */
// ret = vmw_user_lookup_handle(dev_priv, tfile,
// mode_cmd.handle,
// &surface, &bo);
// if (ret)
// goto err_out;
ret = vmw_user_lookup_handle(dev_priv, tfile,
mode_cmd.handle,
&surface, &bo);
if (ret)
goto err_out;
 
/* Create the new framebuffer depending one what we got back */
// if (bo)
// ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
// &mode_cmd);
// else if (surface)
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
surface, &vfb, &mode_cmd);
// else
// BUG();
vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
!(dev_priv->capabilities & SVGA_CAP_3D),
&mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
}
 
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
// if (bo)
// vmw_dmabuf_unreference(&bo);
// if (surface)
// vmw_surface_unreference(&surface);
if (bo)
vmw_dmabuf_unreference(&bo);
if (surface)
vmw_surface_unreference(&surface);
 
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
// ttm_base_object_unref(&user_obj);
ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
} else
vfb->user_obj = user_obj;
1230,7 → 1068,7
.fb_create = vmw_kms_fb_create,
};
 
int vmw_kms_present(struct vmw_private *dev_priv,
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct vmw_surface *surface,
1239,240 → 1077,48
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
SVGASignedRect *blits;
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->primary->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
&surface->res, destX, destY,
num_clips, 1, NULL);
}
 
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
 
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
 
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
ret = -ENOMEM;
goto out_free_tmp;
}
 
left = clips->x;
right = clips->x + clips->w;
top = clips->y;
bottom = clips->y + clips->h;
 
for (i = 1; i < num_clips; i++) {
left = min_t(int, left, (int)clips[i].x);
right = max_t(int, right, (int)clips[i].x + clips[i].w);
top = min_t(int, top, (int)clips[i].y);
bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
 
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
 
blits = (SVGASignedRect *)&cmd[1];
 
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
 
for (i = 0; i < num_clips; i++) {
tmp[i].x1 = clips[i].x - left;
tmp[i].x2 = clips[i].x + clips[i].w - left;
tmp[i].y1 = clips[i].y - top;
tmp[i].y2 = clips[i].y + clips[i].h - top;
}
 
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
struct vmw_clip_rect clip;
int num;
 
clip.x1 = left + destX - unit->crtc.x;
clip.y1 = top + destY - unit->crtc.y;
clip.x2 = right + destX - unit->crtc.x;
clip.y2 = bottom + destY - unit->crtc.y;
 
/* skip any crtcs that misses the clip region */
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
 
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
 
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
 
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
cmd->body.destScreenId = unit->unit;
 
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
/* if no cliprects hit skip this */
if (num == 0)
continue;
 
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, NULL);
 
if (unlikely(ret != 0))
break;
}
 
kfree(cmd);
out_free_tmp:
kfree(tmp);
 
return ret;
}
 
int vmw_kms_readback(struct vmw_private *dev_priv,
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_surface *surface,
uint32_t sid,
int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
struct vmw_dma_buffer *dmabuf = vfbd->buffer;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, ret, num_units, blits_pos;
int ret;
 
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
struct {
uint32_t header;
SVGAFifoCmdBlitScreenToGMRFB body;
} *blits;
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->primary->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
switch (dev_priv->active_display_unit) {
case vmw_du_screen_target:
ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
&surface->res, destX, destY,
num_clips, 1, NULL);
break;
case vmw_du_screen_object:
ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
sid, destX, destY, clips,
num_clips);
break;
default:
WARN_ONCE(true,
"Present called with invalid display system.\n");
ret = -ENOSYS;
break;
}
if (ret)
return ret;
 
BUG_ON(dmabuf == NULL);
BUG_ON(!clips || !num_clips);
vmw_fifo_flush(dev_priv, false);
 
/* take a safe guess at fifo size */
fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
return -ENOMEM;
return 0;
}
 
memset(cmd, 0, fifo_size);
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
cmd->body.format.colorDepth = vfb->base.depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = vfb->base.pitches[0];
cmd->body.ptr.gmrId = vfb->user_handle;
cmd->body.ptr.offset = 0;
 
blits = (void *)&cmd[1];
blits_pos = 0;
for (i = 0; i < num_units; i++) {
struct drm_vmw_rect *c = clips;
for (k = 0; k < num_clips; k++, c++) {
/* transform clip coords to crtc origin based coords */
int clip_x1 = c->x - units[i]->crtc.x;
int clip_x2 = c->x - units[i]->crtc.x + c->w;
int clip_y1 = c->y - units[i]->crtc.y;
int clip_y2 = c->y - units[i]->crtc.y + c->h;
int dest_x = c->x;
int dest_y = c->y;
 
/* compensate for clipping, we negate
* a negative number and add that.
*/
if (clip_x1 < 0)
dest_x += -clip_x1;
if (clip_y1 < 0)
dest_y += -clip_y1;
 
/* clip */
clip_x1 = max(clip_x1, 0);
clip_y1 = max(clip_y1, 0);
clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
 
/* and cull any rects that misses the crtc */
if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
clip_y1 >= units[i]->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
continue;
 
blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
blits[blits_pos].body.srcScreenId = units[i]->unit;
blits[blits_pos].body.destOrigin.x = dest_x;
blits[blits_pos].body.destOrigin.y = dest_y;
 
blits[blits_pos].body.srcRect.left = clip_x1;
blits[blits_pos].body.srcRect.top = clip_y1;
blits[blits_pos].body.srcRect.right = clip_x2;
blits[blits_pos].body.srcRect.bottom = clip_y2;
blits_pos++;
}
}
/* reset size here and use calculated exact size from loops */
fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
 
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
0, user_fence_rep, NULL);
 
kfree(cmd);
 
return ret;
}
 
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
1482,28 → 1128,37
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
/* assumed largest fb size */
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
 
ret = vmw_kms_init_screen_object_display(dev_priv);
ret = vmw_kms_stdu_init_display(dev_priv);
if (ret) {
ret = vmw_kms_sou_init_display(dev_priv);
if (ret) /* Fallback */
ret = vmw_kms_ldu_init_display(dev_priv);
}
 
return 0;
return ret;
}
 
int vmw_kms_close(struct vmw_private *dev_priv)
{
int ret;
 
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
// drm_mode_config_cleanup(dev_priv->dev);
// if (dev_priv->sou_priv)
// vmw_kms_close_screen_object_display(dev_priv);
// else
// vmw_kms_close_legacy_display_system(dev_priv);
return 0;
drm_mode_config_cleanup(dev_priv->dev);
if (dev_priv->active_display_unit == vmw_du_screen_object)
ret = vmw_kms_sou_close_display(dev_priv);
else if (dev_priv->active_display_unit == vmw_du_screen_target)
ret = vmw_kms_stdu_close_display(dev_priv);
else
ret = vmw_kms_ldu_close_display(dev_priv);
 
return ret;
}
 
#if 0
1554,7 → 1209,8
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
vmw_mmio_write(pitch, vmw_priv->mmio_virt +
SVGA_FIFO_PITCHLOCK);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1580,7 → 1236,7
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
SVGA_FIFO_PITCHLOCK);
 
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1629,7 → 1285,7
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_priv->vga_pitchlock);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(vmw_priv->vga_pitchlock,
vmw_mmio_write(vmw_priv->vga_pitchlock,
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1653,7 → 1309,9
uint32_t pitch,
uint32_t height)
{
return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
return ((u64) pitch * (u64) height) < (u64)
((dev_priv->active_display_unit == vmw_du_screen_target) ?
dev_priv->prim_bb_mem : dev_priv->vram_size);
}
 
 
1660,7 → 1318,7
/**
* Function called by DRM code called with vbl_lock held.
*/
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
return 0;
}
1668,7 → 1326,7
/**
* Function called by DRM code called with vbl_lock held.
*/
int vmw_enable_vblank(struct drm_device *dev, int crtc)
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
return -ENOSYS;
}
1676,7 → 1334,7
/**
* Function called by DRM code called with vbl_lock held.
*/
void vmw_disable_vblank(struct drm_device *dev, int crtc)
void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
}
 
1727,76 → 1385,6
return 0;
}
 
#if 0
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct drm_file *file_priv ;
struct vmw_fence_obj *fence = NULL;
struct drm_clip_rect clips;
int ret;
 
if (event == NULL)
return -EINVAL;
 
/* require ScreenObject support for page flipping */
if (!dev_priv->sou_priv)
return -ENOSYS;
 
file_priv = event->base.file_priv;
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;
 
crtc->primary->fb = fb;
 
/* do a full screen dirty update */
clips.x1 = clips.y1 = 0;
clips.x2 = fb->width;
clips.y2 = fb->height;
 
if (vfb->dmabuf)
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
0, 0, &clips, 1, 1, &fence);
else
ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
0, 0, &clips, 1, 1, &fence);
 
 
if (ret != 0)
goto out_no_fence;
if (!fence) {
ret = -EINVAL;
goto out_no_fence;
}
 
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
&event->event.tv_sec,
&event->event.tv_usec,
true);
 
/*
* No need to hold on to this now. The only cleanup
* we need to do if we fail is unref the fence.
*/
vmw_fence_obj_unreference(&fence);
 
if (vmw_crtc_to_du(crtc)->is_implicit)
vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
 
return ret;
 
out_no_fence:
crtc->primary->fb = old_fb;
return ret;
}
#endif
 
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
1821,8 → 1409,9
}
}
 
void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
return 0;
}
 
void vmw_du_connector_save(struct drm_connector *connector)
1841,9 → 1430,7
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
 
mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
mutex_unlock(&dev_priv->hw_mutex);
 
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
1938,7 → 1525,7
* @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
static void vmw_guess_mode_timing(struct drm_display_mode *mode)
void vmw_guess_mode_timing(struct drm_display_mode *mode)
{
mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50;
1967,9 → 1554,21
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
u32 assumed_bpp = 2;
 
/*
* If using screen objects, then assume 32-bpp because that's what the
* SVGA device is assuming
*/
if (dev_priv->active_display_unit == vmw_du_screen_object)
assumed_bpp = 4;
 
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
max_height = min(max_height, dev_priv->stdu_max_height);
}
 
/* Add preferred mode */
{
mode = drm_mode_duplicate(dev, &prefmode);
if (!mode)
return 0;
1977,7 → 1576,8
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
 
if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
if (vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_bpp,
mode->vdisplay)) {
drm_mode_probed_add(connector, mode);
} else {
1992,7 → 1592,6
 
/* mode might be null here, this is intended */
du->pref_mode = mode;
}
 
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
2000,7 → 1599,8
bmode->vdisplay > max_height)
continue;
 
if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
if (!vmw_kms_validate_mode_vram(dev_priv,
bmode->hdisplay * assumed_bpp,
bmode->vdisplay))
continue;
 
2012,12 → 1612,10
drm_mode_probed_add(connector, mode);
}
 
drm_mode_connector_list_update(connector, true);
/* Move the prefered mode first, help apps pick the right mode. */
if (du->pref_mode)
list_move(&du->pref_mode->head, &connector->probed_modes);
drm_mode_sort(&connector->modes);
 
drm_mode_connector_list_update(connector, true);
 
return 1;
}
 
2040,25 → 1638,21
unsigned rects_size;
int ret;
int i;
u64 total_pixels = 0;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_rect bounding_box = {0};
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect);
goto out_unlock;
return 0;
}
 
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
}
if (unlikely(!rects))
return -ENOMEM;
 
user_rects = (void __user *)(unsigned long)arg->rects;
ret = copy_from_user(rects, user_rects, rects_size);
2077,14 → 1671,460
ret = -EINVAL;
goto out_free;
}
 
/*
* bounding_box.w and bunding_box.h are used as
* lower-right coordinates
*/
if (rects[i].x + rects[i].w > bounding_box.w)
bounding_box.w = rects[i].x + rects[i].w;
 
if (rects[i].y + rects[i].h > bounding_box.h)
bounding_box.h = rects[i].y + rects[i].h;
 
total_pixels += (u64) rects[i].w * (u64) rects[i].h;
}
 
if (dev_priv->active_display_unit == vmw_du_screen_target) {
/*
* For Screen Targets, the limits for a toplogy are:
* 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
* 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
*/
u64 bb_mem = bounding_box.w * bounding_box.h * 4;
u64 pixel_mem = total_pixels * 4;
 
if (bb_mem > dev_priv->prim_bb_mem) {
DRM_ERROR("Topology is beyond supported limits.\n");
ret = -EINVAL;
goto out_free;
}
 
if (pixel_mem > dev_priv->prim_bb_mem) {
DRM_ERROR("Combined output size too large\n");
ret = -EINVAL;
goto out_free;
}
}
 
vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
 
out_free:
kfree(rects);
return ret;
}
#endif
/**
* vmw_kms_helper_dirty - Helper to build commands and perform actions based
* on a set of cliprects and a set of display units.
*
* @dev_priv: Pointer to a device private structure.
* @framebuffer: Pointer to the framebuffer on which to perform the actions.
* @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
* Cliprects are given in framebuffer coordinates.
* @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
* be NULL. Cliprects are given in source coordinates.
* @dest_x: X coordinate offset for the crtc / destination clip rects.
* @dest_y: Y coordinate offset for the crtc / destination clip rects.
* @num_clips: Number of cliprects in the @clips or @vclips array.
* @increment: Integer with which to increment the clip counter when looping.
* Used to skip a predetermined number of clip rects.
* @dirty: Closure structure. See the description of struct vmw_kms_dirty.
*/
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
const struct drm_clip_rect *clips,
const struct drm_vmw_rect *vclips,
s32 dest_x, s32 dest_y,
int num_clips,
int increment,
struct vmw_kms_dirty *dirty)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_crtc *crtc;
u32 num_units = 0;
u32 i, k;
 
dirty->dev_priv = dev_priv;
 
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->primary->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
 
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
s32 crtc_x = unit->crtc.x;
s32 crtc_y = unit->crtc.y;
s32 crtc_width = unit->crtc.mode.hdisplay;
s32 crtc_height = unit->crtc.mode.vdisplay;
const struct drm_clip_rect *clips_ptr = clips;
const struct drm_vmw_rect *vclips_ptr = vclips;
 
dirty->unit = unit;
if (dirty->fifo_reserve_size > 0) {
dirty->cmd = vmw_fifo_reserve(dev_priv,
dirty->fifo_reserve_size);
if (!dirty->cmd) {
DRM_ERROR("Couldn't reserve fifo space "
"for dirty blits.\n");
return -ENOMEM;
}
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
}
dirty->num_hits = 0;
for (i = 0; i < num_clips; i++, clips_ptr += increment,
vclips_ptr += increment) {
s32 clip_left;
s32 clip_top;
 
/*
* Select clip array type. Note that integer type
* in @clips is unsigned short, whereas in @vclips
* it's 32-bit.
*/
if (clips) {
dirty->fb_x = (s32) clips_ptr->x1;
dirty->fb_y = (s32) clips_ptr->y1;
dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
crtc_x;
dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
crtc_y;
} else {
dirty->fb_x = vclips_ptr->x;
dirty->fb_y = vclips_ptr->y;
dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
dest_x - crtc_x;
dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
dest_y - crtc_y;
}
 
dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
 
/* Skip this clip if it's outside the crtc region */
if (dirty->unit_x1 >= crtc_width ||
dirty->unit_y1 >= crtc_height ||
dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
continue;
 
/* Clip right and bottom to crtc limits */
dirty->unit_x2 = min_t(s32, dirty->unit_x2,
crtc_width);
dirty->unit_y2 = min_t(s32, dirty->unit_y2,
crtc_height);
 
/* Clip left and top to crtc limits */
clip_left = min_t(s32, dirty->unit_x1, 0);
clip_top = min_t(s32, dirty->unit_y1, 0);
dirty->unit_x1 -= clip_left;
dirty->unit_y1 -= clip_top;
dirty->fb_x -= clip_left;
dirty->fb_y -= clip_top;
 
dirty->clip(dirty);
}
 
dirty->fifo_commit(dirty);
}
 
return 0;
}
 
/**
* vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
* command submission.
*
* @dev_priv. Pointer to a device private structure.
* @buf: The buffer object
* @interruptible: Whether to perform waits as interruptible.
* @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
* The buffer will be validated as a GMR. Already pinned buffers will not be
* validated.
*
* Returns 0 on success, negative error code on failure, -ERESTARTSYS if
* interrupted by a signal.
*/
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible,
bool validate_as_mob)
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
 
ttm_bo_reserve(bo, false, false, interruptible, NULL);
ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
validate_as_mob);
if (ret)
ttm_bo_unreserve(bo);
 
return ret;
}
 
/**
* vmw_kms_helper_buffer_revert - Undo the actions of
* vmw_kms_helper_buffer_prepare.
*
* @res: Pointer to the buffer object.
*
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_buffer_prepare.
*/
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
{
if (buf)
ttm_bo_unreserve(&buf->base);
}
 
/**
* vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
* kms command submission.
*
* @dev_priv: Pointer to a device private structure.
* @file_priv: Pointer to a struct drm_file representing the caller's
* connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
* if non-NULL, @user_fence_rep must be non-NULL.
* @buf: The buffer object.
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
* @user_fence_rep: Optional pointer to a user-space provided struct
* drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
* function copies fence data to user-space in a fail-safe manner.
*/
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_dma_buffer *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep)
{
struct vmw_fence_obj *fence;
uint32_t handle;
int ret;
 
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
file_priv ? &handle : NULL);
if (buf)
vmw_fence_single_bo(&buf->base, fence);
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
handle);
if (out_fence)
*out_fence = fence;
else
vmw_fence_obj_unreference(&fence);
 
vmw_kms_helper_buffer_revert(buf);
}
 
 
/**
* vmw_kms_helper_resource_revert - Undo the actions of
* vmw_kms_helper_resource_prepare.
*
* @res: Pointer to the resource. Typically a surface.
*
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_resource_prepare.
*/
void vmw_kms_helper_resource_revert(struct vmw_resource *res)
{
vmw_kms_helper_buffer_revert(res->backup);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
 
/**
* vmw_kms_helper_resource_prepare - Reserve and validate a resource before
* command submission.
*
* @res: Pointer to the resource. Typically a surface.
* @interruptible: Whether to perform waits as interruptible.
*
* Reserves and validates also the backup buffer if a guest-backed resource.
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted by a signal.
*/
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
bool interruptible)
{
int ret = 0;
 
if (interruptible)
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
else
mutex_lock(&res->dev_priv->cmdbuf_mutex);
 
if (unlikely(ret != 0))
return -ERESTARTSYS;
 
ret = vmw_resource_reserve(res, interruptible, false);
if (ret)
goto out_unlock;
 
if (res->backup) {
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
interruptible,
res->dev_priv->has_mob);
if (ret)
goto out_unreserve;
}
ret = vmw_resource_validate(res);
if (ret)
goto out_revert;
return 0;
 
out_revert:
vmw_kms_helper_buffer_revert(res->backup);
out_unreserve:
vmw_resource_unreserve(res, false, NULL, 0);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return ret;
}
#endif
 
/**
* vmw_kms_helper_resource_finish - Unreserve and fence a resource after
* kms command submission.
*
* @res: Pointer to the resource. Typically a surface.
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
*/
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
struct vmw_fence_obj **out_fence)
{
if (res->backup || out_fence)
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
out_fence, NULL);
 
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
 
/**
* vmw_kms_update_proxy - Helper function to update a proxy surface from
* its backing MOB.
*
* @res: Pointer to the surface resource
* @clips: Clip rects in framebuffer (surface) space.
* @num_clips: Number of clips in @clips.
* @increment: Integer with which to increment the clip counter when looping.
* Used to skip a predetermined number of clip rects.
*
* This function makes sure the proxy surface is updated from its backing MOB
* using the region given by @clips. The surface resource @res and its backing
* MOB needs to be reserved and validated on call.
*/
int vmw_kms_update_proxy(struct vmw_resource *res,
const struct drm_clip_rect *clips,
unsigned num_clips,
int increment)
{
struct vmw_private *dev_priv = res->dev_priv;
struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBImage body;
} *cmd;
SVGA3dBox *box;
size_t copy_size = 0;
int i;
 
if (!clips)
return 0;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
if (!cmd) {
DRM_ERROR("Couldn't reserve fifo space for proxy surface "
"update.\n");
return -ENOMEM;
}
 
for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
box = &cmd->body.box;
 
cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
cmd->header.size = sizeof(cmd->body);
cmd->body.image.sid = res->id;
cmd->body.image.face = 0;
cmd->body.image.mipmap = 0;
 
if (clips->x1 > size->width || clips->x2 > size->width ||
clips->y1 > size->height || clips->y2 > size->height) {
DRM_ERROR("Invalid clips outsize of framebuffer.\n");
return -EINVAL;
}
 
box->x = clips->x1;
box->y = clips->y1;
box->z = 0;
box->w = clips->x2 - clips->x1;
box->h = clips->y2 - clips->y1;
box->d = 1;
 
copy_size += sizeof(*cmd);
}
 
vmw_fifo_commit(dev_priv, copy_size);
 
return 0;
}
 
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
unsigned unit,
u32 max_width,
u32 max_height,
struct drm_connector **p_con,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode)
{
struct drm_connector *con;
struct vmw_display_unit *du;
struct drm_display_mode *mode;
int i = 0;
 
list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
head) {
if (i == unit)
break;
 
++i;
}
 
if (i != unit) {
DRM_ERROR("Could not find initial display unit.\n");
return -EINVAL;
}
 
if (list_empty(&con->modes))
(void) vmw_du_connector_fill_modes(con, max_width, max_height);
 
if (list_empty(&con->modes)) {
DRM_ERROR("Could not find initial display mode.\n");
return -EINVAL;
}
 
du = vmw_connector_to_du(con);
*p_con = con;
*p_crtc = &du->crtc;
 
list_for_each_entry(mode, &con->modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED)
break;
}
 
if (mode->type & DRM_MODE_TYPE_PREFERRED)
*p_mode = mode;
else {
WARN_ONCE(true, "Could not find initial preferred mode.\n");
*p_mode = list_first_entry(&con->modes,
struct drm_display_mode,
head);
}
 
return 0;
}
/drivers/video/drm/vmwgfx/vmwgfx_kms.h
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
32,11 → 32,60
#include <drm/drm_crtc_helper.h>
#include "vmwgfx_drv.h"
 
/**
* struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
* function.
*
* @fifo_commit: Callback that is called once for each display unit after
* all clip rects. This function must commit the fifo space reserved by the
* helper. Set up by the caller.
* @clip: Callback that is called for each cliprect on each display unit.
* Set up by the caller.
* @fifo_reserve_size: Fifo size that the helper should try to allocat for
* each display unit. Set up by the caller.
* @dev_priv: Pointer to the device private. Set up by the helper.
* @unit: The current display unit. Set up by the helper before a call to @clip.
* @cmd: The allocated fifo space. Set up by the helper before the first @clip
* call.
* @num_hits: Number of clip rect commands for this display unit.
* Cleared by the helper before the first @clip call. Updated by the @clip
* callback.
* @fb_x: Clip rect left side in framebuffer coordinates.
* @fb_y: Clip rect right side in framebuffer coordinates.
* @unit_x1: Clip rect left side in crtc coordinates.
* @unit_y1: Clip rect top side in crtc coordinates.
* @unit_x2: Clip rect right side in crtc coordinates.
* @unit_y2: Clip rect bottom side in crtc coordinates.
*
* The clip rect coordinates are updated by the helper for each @clip call.
* Note that this may be derived from if more info needs to be passed between
* helper caller and helper callbacks.
*/
struct vmw_kms_dirty {
void (*fifo_commit)(struct vmw_kms_dirty *);
void (*clip)(struct vmw_kms_dirty *);
size_t fifo_reserve_size;
struct vmw_private *dev_priv;
struct vmw_display_unit *unit;
void *cmd;
u32 num_hits;
s32 fb_x;
s32 fb_y;
s32 unit_x1;
s32 unit_y1;
s32 unit_x2;
s32 unit_y2;
};
 
#define VMWGFX_NUM_DISPLAY_UNITS 1
 
 
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
#define vmw_framebuffer_to_vfbd(x) \
container_of(x, struct vmw_framebuffer_dmabuf, base.base)
 
/**
* Base class for framebuffers
53,10 → 102,28
uint32_t user_handle;
};
 
/*
* Clip rectangle
*/
struct vmw_clip_rect {
int x1, x2, y1, y2;
};
 
#define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc)
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
struct list_head head;
bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
};
 
 
struct vmw_framebuffer_dmabuf {
struct vmw_framebuffer base;
struct vmw_dma_buffer *buffer;
};
 
 
/*
* Basic cursor manipulation
*/
92,6 → 159,8
 
int hotspot_x;
int hotspot_y;
s32 core_hotspot_x;
s32 core_hotspot_y;
 
unsigned unit;
 
120,20 → 189,17
/*
* Shared display unit functions - vmwgfx_kms.c
*/
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags);
void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size);
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
143,25 → 209,118
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
const struct drm_clip_rect *clips,
const struct drm_vmw_rect *vclips,
s32 dest_x, s32 dest_y,
int num_clips,
int increment,
struct vmw_kms_dirty *dirty);
 
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible,
bool validate_as_mob);
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_dma_buffer *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep);
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
bool interruptible);
void vmw_kms_helper_resource_revert(struct vmw_resource *res);
void vmw_kms_helper_resource_finish(struct vmw_resource *res,
struct vmw_fence_obj **out_fence);
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd *mode_cmd);
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
unsigned unit,
u32 max_width,
u32 max_height,
struct drm_connector **p_con,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
 
/*
* Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment);
int vmw_kms_update_proxy(struct vmw_resource *res,
const struct drm_clip_rect *clips,
unsigned num_clips,
int increment);
 
/*
* Screen Objects display functions - vmwgfx_scrn.c
*/
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects);
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
struct drm_crtc *crtc);
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc);
int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
struct vmw_resource *srf,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence);
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
unsigned num_clips, int increment,
bool interruptible,
struct vmw_fence_obj **out_fence);
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
uint32_t num_clips);
 
/*
* Screen Target Display Unit functions - vmwgfx_stdu.c
*/
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
struct vmw_resource *srf,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence);
int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
uint32_t num_clips,
int increment,
bool to_surface,
bool interruptible);
 
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_ldu.c
0,0 → 1,488
/**************************************************************************
*
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_kms.h"
#include <drm/drm_plane_helper.h>
 
 
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
#define vmw_encoder_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.encoder)
#define vmw_connector_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.connector)
 
struct vmw_legacy_display {
struct list_head active;
 
unsigned num_active;
unsigned last_num_active;
 
struct vmw_framebuffer *fb;
};
 
/**
* Display unit using the legacy register interface.
*/
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
 
struct list_head active;
};
 
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
{
list_del_init(&ldu->active);
vmw_du_cleanup(&ldu->base);
kfree(ldu);
}
 
 
/*
* Legacy Display Unit CRTC functions
*/
 
static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
{
vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
}
 
static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
int i = 0, ret;
 
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
*/
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
int w = 0, h = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
w = max(w, crtc->x + crtc->mode.hdisplay);
h = max(h, crtc->y + crtc->mode.vdisplay);
i++;
}
 
if (crtc == NULL)
return 0;
fb = entry->base.crtc.primary->fb;
 
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
}
 
if (!list_empty(&lds->active)) {
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.primary->fb;
 
vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
}
 
/* Make sure we always show something. */
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
lds->num_active ? lds->num_active : 1);
 
i = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
 
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
 
i++;
}
 
BUG_ON(i != lds->num_active);
 
lds->last_num_active = lds->num_active;
 
 
/* Find the first du with a cursor. */
list_for_each_entry(entry, &lds->active, active) {
du = &entry->base;
 
if (!du->cursor_dmabuf)
continue;
 
// ret = vmw_cursor_update_dmabuf(dev_priv,
// du->cursor_dmabuf,
// 64, 64,
// du->hotspot_x,
// du->hotspot_y);
// if (ret == 0)
// break;
 
DRM_ERROR("Could not update cursor image\n");
}
 
return 0;
}
 
static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *ldu)
{
struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
if (list_empty(&ldu->active))
return 0;
 
/* Must init otherwise list_empty(&ldu->active) will not work. */
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
if (ld->fb->unpin)
ld->fb->unpin(ld->fb);
ld->fb = NULL;
}
 
return 0;
}
 
static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *ldu,
struct vmw_framebuffer *vfb)
{
struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
struct list_head *at;
 
BUG_ON(!ld->num_active && ld->fb);
if (vfb != ld->fb) {
if (ld->fb && ld->fb->unpin)
ld->fb->unpin(ld->fb);
if (vfb->pin)
vfb->pin(vfb);
ld->fb = vfb;
}
 
if (!list_empty(&ldu->active))
return 0;
 
at = &ld->active;
list_for_each_entry(entry, &ld->active, active) {
if (entry->base.unit > ldu->base.unit)
break;
 
at = &entry->active;
}
 
list_add(&ldu->active, at);
 
ld->num_active++;
 
return 0;
}
 
static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
{
struct vmw_private *dev_priv;
struct vmw_legacy_display_unit *ldu;
struct drm_connector *connector;
struct drm_display_mode *mode;
struct drm_encoder *encoder;
struct vmw_framebuffer *vfb;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
 
if (!set)
return -EINVAL;
 
if (!set->crtc)
return -EINVAL;
 
/* get the ldu */
crtc = set->crtc;
ldu = vmw_crtc_to_ldu(crtc);
vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
dev_priv = vmw_priv(crtc->dev);
 
if (set->num_connectors > 1) {
DRM_ERROR("to many connectors\n");
return -EINVAL;
}
 
if (set->num_connectors == 1 &&
set->connectors[0] != &ldu->base.connector) {
DRM_ERROR("connector doesn't match %p %p\n",
set->connectors[0], &ldu->base.connector);
return -EINVAL;
}
 
/* ldu only supports one fb active at the time */
if (dev_priv->ldu_priv->fb && vfb &&
!(dev_priv->ldu_priv->num_active == 1 &&
!list_empty(&ldu->active)) &&
dev_priv->ldu_priv->fb != vfb) {
DRM_ERROR("Multiple framebuffers not supported\n");
return -EINVAL;
}
 
/* since they always map one to one these are safe */
connector = &ldu->base.connector;
encoder = &ldu->base.encoder;
 
/* should we turn the crtc off? */
if (set->num_connectors == 0 || !set->mode || !set->fb) {
 
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->primary->fb = NULL;
crtc->enabled = false;
 
vmw_ldu_del_active(dev_priv, ldu);
 
return vmw_ldu_commit_list(dev_priv);
}
 
 
/* we now know we want to set a mode */
mode = set->mode;
fb = set->fb;
 
if (set->x + mode->hdisplay > fb->width ||
set->y + mode->vdisplay > fb->height) {
DRM_ERROR("set outside of framebuffer\n");
return -EINVAL;
}
 
vmw_svga_enable(dev_priv);
 
crtc->primary->fb = fb;
encoder->crtc = crtc;
connector->encoder = encoder;
crtc->x = set->x;
crtc->y = set->y;
crtc->mode = *mode;
crtc->enabled = true;
 
vmw_ldu_add_active(dev_priv, ldu, vfb);
 
return vmw_ldu_commit_list(dev_priv);
}
 
static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
// .cursor_set2 = vmw_du_crtc_cursor_set2,
// .cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_ldu_crtc_destroy,
.set_config = vmw_ldu_crtc_set_config,
};
 
 
/*
* Legacy Display Unit encoder functions
*/
 
static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
{
vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
}
 
static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
.destroy = vmw_ldu_encoder_destroy,
};
 
/*
* Legacy Display Unit connector functions
*/
 
static void vmw_ldu_connector_destroy(struct drm_connector *connector)
{
vmw_ldu_destroy(vmw_connector_to_ldu(connector));
}
 
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.save = vmw_du_connector_save,
.restore = vmw_du_connector_restore,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
};
 
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_legacy_display_unit *ldu;
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
if (!ldu)
return -ENOMEM;
 
ldu->base.unit = unit;
crtc = &ldu->base.crtc;
encoder = &ldu->base.encoder;
connector = &ldu->base.connector;
 
INIT_LIST_HEAD(&ldu->active);
 
ldu->base.pref_active = (unit == 0);
ldu->base.pref_width = dev_priv->initial_width;
ldu->base.pref_height = dev_priv->initial_height;
ldu->base.pref_mode = NULL;
ldu->base.is_implicit = true;
 
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
connector->status = vmw_du_connector_detect(connector, true);
 
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
 
(void) drm_connector_register(connector);
 
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
 
drm_mode_crtc_set_gamma_size(crtc, 256);
 
drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
 
return 0;
}
 
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int i, ret;
 
if (dev_priv->ldu_priv) {
DRM_INFO("ldu system already on\n");
return -EINVAL;
}
 
dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
if (!dev_priv->ldu_priv)
return -ENOMEM;
 
INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
dev_priv->ldu_priv->num_active = 0;
dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
 
/* for old hardware without multimon only enable one display */
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
else
ret = drm_vblank_init(dev, 1);
if (ret != 0)
goto err_free;
 
ret = drm_mode_create_dirty_info_property(dev);
if (ret != 0)
goto err_vblank_cleanup;
 
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_ldu_init(dev_priv, i);
else
vmw_ldu_init(dev_priv, 0);
 
dev_priv->active_display_unit = vmw_du_legacy;
 
DRM_INFO("Legacy Display Unit initialized\n");
 
return 0;
 
err_vblank_cleanup:
drm_vblank_cleanup(dev);
err_free:
kfree(dev_priv->ldu_priv);
dev_priv->ldu_priv = NULL;
return ret;
}
 
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
if (!dev_priv->ldu_priv)
return -ENOSYS;
 
drm_vblank_cleanup(dev);
 
BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
 
kfree(dev_priv->ldu_priv);
 
return 0;
}
 
 
int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment)
{
size_t fifo_size;
int i;
 
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
 
fifo_size = sizeof(*cmd) * num_clips;
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
 
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = SVGA_CMD_UPDATE;
cmd[i].body.x = clips->x1;
cmd[i].body.y = clips->y1;
cmd[i].body.width = clips->x2 - clips->x1;
cmd[i].body.height = clips->y2 - clips->y1;
}
 
vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
/drivers/video/drm/vmwgfx/vmwgfx_marker.c
153,39 → 153,3
}
return 0;
}
 
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
u64 quotient;
 
if (dividend < 0) {
quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
*remainder = -*remainder;
if (divisor > 0)
quotient = -quotient;
} else {
quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
if (divisor < 0)
quotient = -quotient;
}
return quotient;
}
 
struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
s32 rem;
 
if (!nsec)
return (struct timespec) {0, 0};
 
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
if (unlikely(rem < 0)) {
ts.tv_sec--;
rem += NSEC_PER_SEC;
}
ts.tv_nsec = rem;
 
return ts;
}
 
/drivers/video/drm/vmwgfx/vmwgfx_mob.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
31,7 → 31,7
* If we set up the screen target otable, screen objects stop working.
*/
 
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
 
#ifdef CONFIG_64BIT
#define VMW_PPN_SIZE 8
67,11 → 67,25
* @size: Size of the table (page-aligned).
* @page_table: Pointer to a struct vmw_mob holding the page table.
*/
struct vmw_otable {
unsigned long size;
struct vmw_mob *page_table;
static const struct vmw_otable pre_dx_tables[] = {
{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
};
 
static const struct vmw_otable dx_tables[] = {
{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
};
 
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob);
static void vmw_mob_pt_setup(struct vmw_mob *mob,
92,6 → 106,7
*/
static int vmw_setup_otable_base(struct vmw_private *dev_priv,
SVGAOTableType type,
struct ttm_buffer_object *otable_bo,
unsigned long offset,
struct vmw_otable *otable)
{
106,7 → 121,7
 
BUG_ON(otable->page_table != NULL);
 
vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
vsgt = vmw_bo_sg_table(otable_bo);
vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
WARN_ON(!vmw_piter_next(&iter));
 
142,7 → 157,7
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
cmd->header.size = sizeof(cmd->body);
cmd->body.type = type;
cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = otable->size;
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = mob->pt_level;
191,7 → 206,9
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable "
"takedown.\n");
} else {
return;
}
 
memset(cmd, 0, sizeof(*cmd));
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
cmd->header.size = sizeof(cmd->body);
201,7 → 218,6
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
 
if (bo) {
int ret;
217,47 → 233,21
otable->page_table = NULL;
}
 
/*
* vmw_otables_setup - Set up guest backed memory object tables
*
* @dev_priv: Pointer to a device private structure
*
* Takes care of the device guest backed surface
* initialization, by setting up the guest backed memory object tables.
* Returns 0 on success and various error codes on failure. A succesful return
* means the object tables can be taken down using the vmw_otables_takedown
* function.
*/
int vmw_otables_setup(struct vmw_private *dev_priv)
 
static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
struct vmw_otable_batch *batch)
{
unsigned long offset;
unsigned long bo_size;
struct vmw_otable *otables;
struct vmw_otable *otables = batch->otables;
SVGAOTableType i;
int ret;
 
otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
GFP_KERNEL);
if (unlikely(otables == NULL)) {
DRM_ERROR("Failed to allocate space for otable "
"metadata.\n");
return -ENOMEM;
}
bo_size = 0;
for (i = 0; i < batch->num_otables; ++i) {
if (!otables[i].enabled)
continue;
 
otables[SVGA_OTABLE_MOB].size =
VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
otables[SVGA_OTABLE_SURFACE].size =
VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
otables[SVGA_OTABLE_CONTEXT].size =
VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
otables[SVGA_OTABLE_SHADER].size =
VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
otables[SVGA_OTABLE_SCREEN_TARGET].size =
VMWGFX_NUM_GB_SCREEN_TARGET *
SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
 
bo_size = 0;
for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
otables[i].size =
(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
bo_size += otables[i].size;
267,25 → 257,29
ttm_bo_type_device,
&vmw_sys_ne_placement,
0, false, NULL,
&dev_priv->otable_bo);
&batch->otable_bo);
 
if (unlikely(ret != 0))
goto out_no_bo;
 
ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
BUG_ON(ret != 0);
ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
if (unlikely(ret != 0))
goto out_unreserve;
ret = vmw_bo_map_dma(dev_priv->otable_bo);
ret = vmw_bo_map_dma(batch->otable_bo);
if (unlikely(ret != 0))
goto out_unreserve;
 
ttm_bo_unreserve(dev_priv->otable_bo);
ttm_bo_unreserve(batch->otable_bo);
 
offset = 0;
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
ret = vmw_setup_otable_base(dev_priv, i, offset,
for (i = 0; i < batch->num_otables; ++i) {
if (!batch->otables[i].enabled)
continue;
 
ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
offset,
&otables[i]);
if (unlikely(ret != 0))
goto out_no_setup;
292,38 → 286,76
offset += otables[i].size;
}
 
dev_priv->otables = otables;
return 0;
 
out_unreserve:
ttm_bo_unreserve(dev_priv->otable_bo);
ttm_bo_unreserve(batch->otable_bo);
out_no_setup:
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
vmw_takedown_otable_base(dev_priv, i, &otables[i]);
for (i = 0; i < batch->num_otables; ++i) {
if (batch->otables[i].enabled)
vmw_takedown_otable_base(dev_priv, i,
&batch->otables[i]);
}
 
ttm_bo_unref(&dev_priv->otable_bo);
ttm_bo_unref(&batch->otable_bo);
out_no_bo:
kfree(otables);
return ret;
}
 
 
/*
* vmw_otables_takedown - Take down guest backed memory object tables
* vmw_otables_setup - Set up guest backed memory object tables
*
* @dev_priv: Pointer to a device private structure
*
* Take down the Guest Memory Object tables.
* Takes care of the device guest backed surface
* initialization, by setting up the guest backed memory object tables.
* Returns 0 on success and various error codes on failure. A successful return
* means the object tables can be taken down using the vmw_otables_takedown
* function.
*/
void vmw_otables_takedown(struct vmw_private *dev_priv)
int vmw_otables_setup(struct vmw_private *dev_priv)
{
struct vmw_otable **otables = &dev_priv->otable_batch.otables;
int ret;
 
if (dev_priv->has_dx) {
*otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
if (*otables == NULL)
return -ENOMEM;
 
memcpy(*otables, dx_tables, sizeof(dx_tables));
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
} else {
*otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
if (*otables == NULL)
return -ENOMEM;
 
memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
}
 
ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
if (unlikely(ret != 0))
goto out_setup;
 
return 0;
 
out_setup:
kfree(*otables);
return ret;
}
 
static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
struct vmw_otable_batch *batch)
{
SVGAOTableType i;
struct ttm_buffer_object *bo = dev_priv->otable_bo;
struct ttm_buffer_object *bo = batch->otable_bo;
int ret;
 
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
for (i = 0; i < batch->num_otables; ++i)
if (batch->otables[i].enabled)
vmw_takedown_otable_base(dev_priv, i,
&dev_priv->otables[i]);
&batch->otables[i]);
 
ret = ttm_bo_reserve(bo, false, true, false, NULL);
BUG_ON(ret != 0);
331,11 → 363,21
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
 
ttm_bo_unref(&dev_priv->otable_bo);
kfree(dev_priv->otables);
dev_priv->otables = NULL;
ttm_bo_unref(&batch->otable_bo);
}
 
/*
* vmw_otables_takedown - Take down guest backed memory object tables
*
* @dev_priv: Pointer to a device private structure
*
* Take down the Guest Memory Object tables.
*/
void vmw_otables_takedown(struct vmw_private *dev_priv)
{
vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
kfree(dev_priv->otable_batch.otables);
}
 
/*
* vmw_mob_calculate_pt_pages - Calculate the number of page table pages
429,15 → 471,15
* *@addr according to the page table entry size.
*/
#if (VMW_PPN_SIZE == 8)
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
*((u64 *) *addr) = val >> PAGE_SHIFT;
*addr += 2;
}
#else
static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
*(*addr)++ = val >> PAGE_SHIFT;
}
#endif
 
459,16 → 501,14
unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
unsigned long pt_page;
__le32 *addr, *save_addr;
u32 *addr, *save_addr;
unsigned long i;
struct page *page;
 
save_addr = addr = AllocKernelSpace(4096);
 
for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
page = vmw_piter_page(pt_iter);
 
MapPage(save_addr,(addr_t)page, 3);
save_addr = addr = kmap_atomic(page);
 
for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
vmw_mob_assign_ppn(&addr,
477,9 → 517,10
break;
WARN_ON(!vmw_piter_next(data_iter));
}
kunmap_atomic(save_addr);
vmw_piter_next(pt_iter);
}
FreeKernelSpace(save_addr);
 
return num_pt_pages;
}
 
575,7 → 616,7
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
}
vmw_3d_resource_dec(dev_priv, false);
vmw_fifo_resource_dec(dev_priv);
}
 
/*
628,7 → 669,7
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
 
(void) vmw_3d_resource_inc(dev_priv, false);
vmw_fifo_resource_inc(dev_priv);
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
641,7 → 682,7
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob_id;
cmd->body.ptDepth = mob->pt_level;
cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
649,7 → 690,7
return 0;
 
out_no_cmd_space:
vmw_3d_resource_dec(dev_priv, false);
vmw_fifo_resource_dec(dev_priv);
if (pt_set_up)
ttm_bo_unref(&mob->pt_bo);
 
/drivers/video/drm/vmwgfx/vmwgfx_overlay.c
0,0 → 1,619
/**************************************************************************
*
* Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
 
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
 
#include <drm/ttm/ttm_placement.h>
 
#include "device_include/svga_overlay.h"
#include "device_include/svga_escape.h"
 
#define VMW_MAX_NUM_STREAMS 1
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
 
struct vmw_stream {
struct vmw_dma_buffer *buf;
bool claimed;
bool paused;
struct drm_vmw_control_stream_arg saved;
};
 
/**
* Overlay control
*/
struct vmw_overlay {
/*
* Each stream is a single overlay. In Xv these are called ports.
*/
struct mutex mutex;
struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
};
 
static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
return dev_priv ? dev_priv->overlay_priv : NULL;
}
 
struct vmw_escape_header {
uint32_t cmd;
SVGAFifoCmdEscape body;
};
 
struct vmw_escape_video_flush {
struct vmw_escape_header escape;
SVGAEscapeVideoFlush flush;
};
 
static inline void fill_escape(struct vmw_escape_header *header,
uint32_t size)
{
header->cmd = SVGA_CMD_ESCAPE;
header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
header->body.size = size;
}
 
static inline void fill_flush(struct vmw_escape_video_flush *cmd,
uint32_t stream_id)
{
fill_escape(&cmd->escape, sizeof(cmd->flush));
cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
cmd->flush.streamId = stream_id;
}
 
/**
* Send put command to hw.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
struct vmw_escape_video_flush *flush;
size_t fifo_size;
bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
int i, num_items;
SVGAGuestPtr ptr;
 
struct {
struct vmw_escape_header escape;
struct {
uint32_t cmdType;
uint32_t streamId;
} header;
} *cmds;
struct {
uint32_t registerId;
uint32_t value;
} *items;
 
/* defines are a index needs + 1 */
if (have_so)
num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
else
num_items = SVGA_VIDEO_PITCH_3 + 1;
 
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
 
cmds = vmw_fifo_reserve(dev_priv, fifo_size);
/* hardware has hung, can't do anything here */
if (!cmds)
return -ENOMEM;
 
items = (typeof(items))&cmds[1];
flush = (struct vmw_escape_video_flush *)&items[num_items];
 
/* the size is header + number of items */
fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
 
cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
cmds->header.streamId = arg->stream_id;
 
/* the IDs are neatly numbered */
for (i = 0; i < num_items; i++)
items[i].registerId = i;
 
vmw_bo_get_guest_ptr(&buf->base, &ptr);
ptr.offset += arg->offset;
 
items[SVGA_VIDEO_ENABLED].value = true;
items[SVGA_VIDEO_FLAGS].value = arg->flags;
items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
items[SVGA_VIDEO_FORMAT].value = arg->format;
items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
items[SVGA_VIDEO_SIZE].value = arg->size;
items[SVGA_VIDEO_WIDTH].value = arg->width;
items[SVGA_VIDEO_HEIGHT].value = arg->height;
items[SVGA_VIDEO_SRC_X].value = arg->src.x;
items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
items[SVGA_VIDEO_DST_X].value = arg->dst.x;
items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
if (have_so) {
items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
}
 
fill_flush(flush, arg->stream_id);
 
vmw_fifo_commit(dev_priv, fifo_size);
 
return 0;
}
 
/**
* Send stop command to hw.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
uint32_t stream_id,
bool interruptible)
{
struct {
struct vmw_escape_header escape;
SVGAEscapeVideoSetRegs body;
struct vmw_escape_video_flush flush;
} *cmds;
int ret;
 
for (;;) {
cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
if (cmds)
break;
 
ret = vmw_fallback_wait(dev_priv, false, true, 0,
interruptible, 3*HZ);
if (interruptible && ret == -ERESTARTSYS)
return ret;
else
BUG_ON(ret != 0);
}
 
fill_escape(&cmds->escape, sizeof(cmds->body));
cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
cmds->body.header.streamId = stream_id;
cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
cmds->body.items[0].value = false;
fill_flush(&cmds->flush, stream_id);
 
vmw_fifo_commit(dev_priv, sizeof(*cmds));
 
return 0;
}
 
/**
* Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
*
* With the introduction of screen objects buffers could now be
* used with GMRs instead of being locked to vram.
*/
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool inter)
{
if (!pin)
return vmw_dmabuf_unpin(dev_priv, buf, inter);
 
if (dev_priv->active_display_unit == vmw_du_legacy)
return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
 
return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
}
 
/**
* Stop or pause a stream.
*
* If the stream is paused the no evict flag is removed from the buffer
* but left in vram. This allows for instance mode_set to evict it
* should it need to.
*
* The caller must hold the overlay lock.
*
* @stream_id which stream to stop/pause.
* @pause true to pause, false to stop completely.
*/
static int vmw_overlay_stop(struct vmw_private *dev_priv,
uint32_t stream_id, bool pause,
bool interruptible)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct vmw_stream *stream = &overlay->stream[stream_id];
int ret;
 
/* no buffer attached the stream is completely stopped */
if (!stream->buf)
return 0;
 
/* If the stream is paused this is already done */
if (!stream->paused) {
ret = vmw_overlay_send_stop(dev_priv, stream_id,
interruptible);
if (ret)
return ret;
 
/* We just remove the NO_EVICT flag so no -ENOMEM */
ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
interruptible);
if (interruptible && ret == -ERESTARTSYS)
return ret;
else
BUG_ON(ret != 0);
}
 
if (!pause) {
vmw_dmabuf_unreference(&stream->buf);
stream->paused = false;
} else {
stream->paused = true;
}
 
return 0;
}
 
/**
* Update a stream and send any put or stop fifo commands needed.
*
* The caller must hold the overlay lock.
*
* Returns
* -ENOMEM if buffer doesn't fit in vram.
* -ERESTARTSYS if interrupted.
*/
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct vmw_stream *stream = &overlay->stream[arg->stream_id];
int ret = 0;
 
if (!buf)
return -EINVAL;
 
DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
stream->buf, buf, stream->paused ? "" : "not ");
 
if (stream->buf != buf) {
ret = vmw_overlay_stop(dev_priv, arg->stream_id,
false, interruptible);
if (ret)
return ret;
} else if (!stream->paused) {
/* If the buffers match and not paused then just send
* the put command, no need to do anything else.
*/
ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
if (ret == 0)
stream->saved = *arg;
else
BUG_ON(!interruptible);
 
return ret;
}
 
/* We don't start the old stream if we are interrupted.
* Might return -ENOMEM if it can't fit the buffer in vram.
*/
ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
if (ret)
return ret;
 
ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
if (ret) {
/* This one needs to happen no matter what. We only remove
* the NO_EVICT flag so this is safe from -ENOMEM.
*/
BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
!= 0);
return ret;
}
 
if (stream->buf != buf)
stream->buf = vmw_dmabuf_reference(buf);
stream->saved = *arg;
/* stream is no longer stopped/paused */
stream->paused = false;
 
return 0;
}
 
/**
* Stop all streams.
*
* Used by the fb code when starting.
*
* Takes the overlay lock.
*/
int vmw_overlay_stop_all(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, ret;
 
if (!overlay)
return 0;
 
mutex_lock(&overlay->mutex);
 
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
struct vmw_stream *stream = &overlay->stream[i];
if (!stream->buf)
continue;
 
ret = vmw_overlay_stop(dev_priv, i, false, false);
WARN_ON(ret != 0);
}
 
mutex_unlock(&overlay->mutex);
 
return 0;
}
 
/**
* Try to resume all paused streams.
*
* Used by the kms code after moving a new scanout buffer to vram.
*
* Takes the overlay lock.
*/
int vmw_overlay_resume_all(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, ret;
 
if (!overlay)
return 0;
 
mutex_lock(&overlay->mutex);
 
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
struct vmw_stream *stream = &overlay->stream[i];
if (!stream->paused)
continue;
 
ret = vmw_overlay_update_stream(dev_priv, stream->buf,
&stream->saved, false);
if (ret != 0)
DRM_INFO("%s: *warning* failed to resume stream %i\n",
__func__, i);
}
 
mutex_unlock(&overlay->mutex);
 
return 0;
}
 
/**
* Pauses all active streams.
*
* Used by the kms code when moving a new scanout buffer to vram.
*
* Takes the overlay lock.
*/
int vmw_overlay_pause_all(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, ret;
 
if (!overlay)
return 0;
 
mutex_lock(&overlay->mutex);
 
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
if (overlay->stream[i].paused)
DRM_INFO("%s: *warning* stream %i already paused\n",
__func__, i);
ret = vmw_overlay_stop(dev_priv, i, true, false);
WARN_ON(ret != 0);
}
 
mutex_unlock(&overlay->mutex);
 
return 0;
}
 
 
static bool vmw_overlay_available(const struct vmw_private *dev_priv)
{
return (dev_priv->overlay_priv != NULL &&
((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
VMW_OVERLAY_CAP_MASK));
}
 
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data;
struct vmw_dma_buffer *buf;
struct vmw_resource *res;
int ret;
 
if (!vmw_overlay_available(dev_priv))
return -ENOSYS;
 
ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
if (ret)
return ret;
 
mutex_lock(&overlay->mutex);
 
if (!arg->enabled) {
ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
goto out_unlock;
}
 
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
if (ret)
goto out_unlock;
 
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
 
vmw_dmabuf_unreference(&buf);
 
out_unlock:
mutex_unlock(&overlay->mutex);
vmw_resource_unreference(&res);
 
return ret;
}
 
int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
{
if (!vmw_overlay_available(dev_priv))
return 0;
 
return VMW_MAX_NUM_STREAMS;
}
 
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, k;
 
if (!vmw_overlay_available(dev_priv))
return 0;
 
mutex_lock(&overlay->mutex);
 
for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
if (!overlay->stream[i].claimed)
k++;
 
mutex_unlock(&overlay->mutex);
 
return k;
}
 
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i;
 
if (!overlay)
return -ENOSYS;
 
mutex_lock(&overlay->mutex);
 
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
 
if (overlay->stream[i].claimed)
continue;
 
overlay->stream[i].claimed = true;
*out = i;
mutex_unlock(&overlay->mutex);
return 0;
}
 
mutex_unlock(&overlay->mutex);
return -ESRCH;
}
 
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
 
BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
 
if (!overlay)
return -ENOSYS;
 
mutex_lock(&overlay->mutex);
 
WARN_ON(!overlay->stream[stream_id].claimed);
vmw_overlay_stop(dev_priv, stream_id, false, false);
overlay->stream[stream_id].claimed = false;
 
mutex_unlock(&overlay->mutex);
return 0;
}
 
int vmw_overlay_init(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay;
int i;
 
if (dev_priv->overlay_priv)
return -EINVAL;
 
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
 
mutex_init(&overlay->mutex);
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
overlay->stream[i].buf = NULL;
overlay->stream[i].paused = false;
overlay->stream[i].claimed = false;
}
 
dev_priv->overlay_priv = overlay;
 
return 0;
}
 
int vmw_overlay_close(struct vmw_private *dev_priv)
{
struct vmw_overlay *overlay = dev_priv->overlay_priv;
bool forgotten_buffer = false;
int i;
 
if (!overlay)
return -ENOSYS;
 
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
if (overlay->stream[i].buf) {
forgotten_buffer = true;
vmw_overlay_stop(dev_priv, i, false, false);
}
}
 
WARN_ON(forgotten_buffer);
 
dev_priv->overlay_priv = NULL;
kfree(overlay);
 
return 0;
}
/drivers/video/drm/vmwgfx/vmwgfx_prime.c
0,0 → 1,137
/**************************************************************************
*
* Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors:
* Thomas Hellstrom <thellstrom@vmware.com>
*
*/
 
#include "vmwgfx_drv.h"
#include <linux/dma-buf.h>
#include <drm/ttm/ttm_object.h>
 
/*
* DMA-BUF attach- and mapping methods. No need to implement
* these until we have other virtual devices use them.
*/
 
static int vmw_prime_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
{
return -ENOSYS;
}
 
static void vmw_prime_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
}
 
static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
return ERR_PTR(-ENOSYS);
}
 
static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgb,
enum dma_data_direction dir)
{
}
 
static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
{
return NULL;
}
 
static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
}
 
static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
return NULL;
}
 
static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
 
}
static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
unsigned long page_num)
{
return NULL;
}
 
static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
 
}
 
static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
return -ENOSYS;
}
 
const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.attach = vmw_prime_map_attach,
.detach = vmw_prime_map_detach,
.map_dma_buf = vmw_prime_map_dma_buf,
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
.kmap = vmw_prime_dmabuf_kmap,
.kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
.kunmap = vmw_prime_dmabuf_kunmap,
.kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
};
 
int vmw_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 
return ttm_prime_fd_to_handle(tfile, fd, handle);
}
 
int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
}
/drivers/video/drm/vmwgfx/vmwgfx_reg.h
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
39,19 → 39,17
#define VMWGFX_IRQSTATUS_PORT 0x8
 
struct svga_guest_mem_descriptor {
__le32 ppn;
__le32 num_pages;
u32 ppn;
u32 num_pages;
};
 
struct svga_fifo_cmd_fence {
__le32 fence;
u32 fence;
};
 
#define SVGA_SYNC_GENERIC 1
#define SVGA_SYNC_FIFOFULL 2
 
#include "svga_types.h"
#include "device_include/svga3d_reg.h"
 
#include "svga3d_reg.h"
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_resource.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
31,6 → 31,7
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
 
#define VMW_RES_EVICT_ERR_COUNT 10
 
88,6 → 89,11
return res;
}
 
struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res)
{
return kref_get_unless_zero(&res->kref) ? res : NULL;
}
 
/**
* vmw_resource_release_id - release a resource id to the id manager.
116,6 → 122,7
int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
write_lock(&dev_priv->resource_lock);
res->avail = false;
list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
128,6 → 135,7
struct ttm_validate_buffer val_buf;
 
val_buf.bo = bo;
val_buf.shared = false;
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
137,10 → 145,10
}
 
if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head);
vmw_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex);
res->hw_destroy(res);
}
 
id = res->id;
150,20 → 158,17
kfree(res);
 
write_lock(&dev_priv->resource_lock);
 
if (id != -1)
idr_remove(idr, id);
write_unlock(&dev_priv->resource_lock);
}
 
void vmw_resource_unreference(struct vmw_resource **p_res)
{
struct vmw_resource *res = *p_res;
struct vmw_private *dev_priv = res->dev_priv;
 
*p_res = NULL;
write_lock(&dev_priv->resource_lock);
kref_put(&res->kref, vmw_resource_release);
write_unlock(&dev_priv->resource_lock);
}
 
 
254,7 → 259,7
write_unlock(&dev_priv->resource_lock);
}
 
struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
struct idr *idr, int id)
{
struct vmw_resource *res;
261,10 → 266,9
 
read_lock(&dev_priv->resource_lock);
res = idr_find(idr, id);
if (res && res->avail)
kref_get(&res->kref);
else
if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
res = NULL;
 
read_unlock(&dev_priv->resource_lock);
 
if (unlikely(res == NULL))
350,7 → 354,7
}
 
*out_surf = NULL;
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
return ret;
}
 
400,7 → 404,7
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 
// ttm_prime_object_kfree(vmw_user_bo, prime);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
 
int vmw_dmabuf_init(struct vmw_private *dev_priv,
424,7 → 428,7
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
NULL, acc_size, NULL, NULL, bo_free);
return ret;
}
 
477,7 → 481,8
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf)
struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp;
498,7 → 503,6
return ret;
 
tmp = ttm_bo_reference(&user_bo->dma.base);
/*
ret = ttm_prime_object_init(tfile,
size,
&user_bo->prime,
510,9 → 514,12
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
*/
 
*p_dma_buf = &user_bo->dma;
if (p_base) {
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
*handle = user_bo->prime.base.hash.key;
 
out_no_base_object:
564,8 → 571,12
int ret;
 
if (flags & drm_vmw_synccpu_allow_cs) {
struct ttm_bo_device *bdev = bo->bdev;
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
 
if (nonblock)
return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
 
// spin_lock(&bdev->fence_lock);
// ret = ttm_bo_wait(bo, false, true,
// !!(flags & drm_vmw_synccpu_dontblock));
573,10 → 584,10
return ret;
}
 
// ret = ttm_bo_synccpu_write_grab
// (bo, !!(flags & drm_vmw_synccpu_dontblock));
// if (unlikely(ret != 0))
// return ret;
ret = ttm_bo_synccpu_write_grab
(bo, !!(flags & drm_vmw_synccpu_dontblock));
if (unlikely(ret != 0))
return ret;
 
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed);
624,6 → 635,7
struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_base_object *buffer_base;
int ret;
 
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
636,7 → 648,8
 
switch (arg->op) {
case drm_vmw_synccpu_grab:
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
&buffer_base);
if (unlikely(ret != 0))
return ret;
 
644,6 → 657,7
dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf);
ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
686,7 → 700,8
return ret;
 
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &dma_buf);
req->size, false, &handle, &dma_buf,
NULL);
if (unlikely(ret != 0))
goto out_no_dmabuf;
 
716,7 → 731,8
#endif
 
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
uint32_t handle, struct vmw_dma_buffer **out,
struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base;
738,6 → 754,9
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
if (p_base)
*p_base = base;
else
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
 
894,20 → 913,21
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_stream_size,
false, true);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for stream"
" creation.\n");
goto out_unlock;
 
goto out_ret;
}
 
 
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (unlikely(stream == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
ret = -ENOMEM;
goto out_unlock;
goto out_ret;
}
 
res = &stream->stream.res;
920,7 → 940,7
 
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
goto out_unlock;
goto out_ret;
 
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
934,8 → 954,7
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
out_ret:
return ret;
}
#endif
972,13 → 991,23
return ret;
}
 
#if 0
 
/**
* vmw_dumb_create - Create a dumb kms buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @args: Pointer to a struct drm_mode_create_dumb structure
*
* This is a driver callback for the core drm create_dumb functionality.
* Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
* that the arguments have a different format.
*/
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *dma_buf;
int ret;
 
991,7 → 1020,7
 
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle,
&dma_buf);
&dma_buf, NULL);
if (unlikely(ret != 0))
goto out_no_dmabuf;
 
1000,7 → 1029,6
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
#endif
 
/**
* vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1020,7 → 1048,7
struct vmw_dma_buffer *out_buf;
int ret;
 
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0)
return -EINVAL;
 
1138,14 → 1166,16
* command submission.
*
* @res: Pointer to the struct vmw_resource to unreserve.
* @switch_backup: Backup buffer has been switched.
* @new_backup: Pointer to new backup buffer if command submission
* switched.
* @new_backup_offset: New backup offset if @new_backup is !NULL.
* switched. May be NULL.
* @new_backup_offset: New backup offset if @switch_backup is true.
*
* Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary.
*/
void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset)
{
1154,8 → 1184,7
if (!list_empty(&res->lru_head))
return;
 
if (new_backup && new_backup != res->backup) {
 
if (switch_backup && new_backup != res->backup) {
if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head);
1162,14 → 1191,18
vmw_dmabuf_unreference(&res->backup);
}
 
if (new_backup) {
res->backup = vmw_dmabuf_reference(new_backup);
lockdep_assert_held(&new_backup->base.resv->lock.base);
list_add_tail(&res->mob_head, &new_backup->res_list);
} else {
res->backup = NULL;
}
if (new_backup)
}
if (switch_backup)
res->backup_offset = new_backup_offset;
 
if (!res->func->may_evict || res->id == -1)
if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
 
write_lock(&dev_priv->resource_lock);
1206,8 → 1239,9
 
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
 
1244,7 → 1278,8
* the buffer may not be bound to the resource at this point.
*
*/
int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
1255,10 → 1290,14
 
if (res->func->needs_backup && res->backup == NULL &&
!no_backup) {
ret = vmw_resource_buf_alloc(res, true);
if (unlikely(ret != 0))
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a backup buffer "
"of size %lu. bytes\n",
(unsigned long) res->backup_size);
return ret;
}
}
 
return 0;
}
1290,7 → 1329,7
* @res: The resource to evict.
* @interruptible: Whether to wait interruptible.
*/
int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
1299,6 → 1338,7
BUG_ON(!func->may_evict);
 
val_buf.bo = NULL;
val_buf.shared = false;
ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
1340,10 → 1380,11
struct ttm_validate_buffer val_buf;
unsigned err_count = 0;
 
if (likely(!res->func->may_evict))
if (!res->func->create)
return 0;
 
val_buf.bo = NULL;
val_buf.shared = false;
if (res->backup)
val_buf.bo = &res->backup->base;
do {
1411,25 → 1452,16
struct vmw_fence_obj *fence)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
struct vmw_fence_obj *old_fence_obj;
 
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
 
if (fence == NULL)
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
else
driver->sync_obj_ref(fence);
 
spin_lock(&bdev->fence_lock);
 
old_fence_obj = bo->sync_obj;
bo->sync_obj = fence;
 
spin_unlock(&bdev->fence_lock);
 
if (old_fence_obj)
vmw_fence_obj_unreference(&old_fence_obj);
reservation_object_add_excl_fence(bo->resv, &fence->base);
fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}
 
/**
1436,7 → 1468,7
* vmw_resource_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @mem: The truct ttm_mem_reg indicating to what memory
* @mem: The struct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
* Evicts the Guest Backed hardware resource if the backup
1454,8 → 1486,51
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
/**
* vmw_query_readback_all - Read back cached query states
*
* @dx_query_mob: Buffer containing the DX query MOB
*
* Read back cached states from the device if they exist. This function
* assumings binding_mutex is held.
*/
int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXReadbackAllQuery body;
} *cmd;
 
 
/* No query bound, so do nothing */
if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
return 0;
 
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
 
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for "
"query MOB read back.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = dx_query_ctx->id;
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
/* Triggers a rebind the next time affected context is bound */
dx_query_mob->dx_query_ctx = NULL;
 
return 0;
}
}
 
/**
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
*
1534,3 → 1609,107
 
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
 
/**
* vmw_resource_pin - Add a pin reference on a resource
*
* @res: The resource to add a pin reference on
*
* This function adds a pin reference, and if needed validates the resource.
* Having a pin reference means that the resource can never be evicted, and
* its id will never change as long as there is a pin reference.
* This function returns 0 on success and a negative error code on failure.
*/
int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
 
ttm_write_lock(&dev_priv->reservation_sem, interruptible);
mutex_lock(&dev_priv->cmdbuf_mutex);
ret = vmw_resource_reserve(res, interruptible, false);
if (ret)
goto out_no_reserve;
 
if (res->pin_count == 0) {
struct vmw_dma_buffer *vbo = NULL;
 
if (res->backup) {
vbo = res->backup;
 
ttm_bo_reserve(&vbo->base, interruptible, false, false,
NULL);
if (!vbo->pin_count) {
ret = ttm_bo_validate
(&vbo->base,
res->func->backup_placement,
interruptible, false);
if (ret) {
ttm_bo_unreserve(&vbo->base);
goto out_no_validate;
}
}
 
/* Do we really need to pin the MOB as well? */
vmw_bo_pin_reserved(vbo, true);
}
ret = vmw_resource_validate(res);
if (vbo)
ttm_bo_unreserve(&vbo->base);
if (ret)
goto out_no_validate;
}
res->pin_count++;
 
out_no_validate:
vmw_resource_unreserve(res, false, NULL, 0UL);
out_no_reserve:
mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_write_unlock(&dev_priv->reservation_sem);
 
return ret;
}
 
/**
* vmw_resource_unpin - Remove a pin reference from a resource
*
* @res: The resource to remove a pin reference from
*
* Having a pin reference means that the resource can never be evicted, and
* its id will never change as long as there is a pin reference.
*/
void vmw_resource_unpin(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
 
ttm_read_lock(&dev_priv->reservation_sem, false);
mutex_lock(&dev_priv->cmdbuf_mutex);
 
ret = vmw_resource_reserve(res, false, true);
WARN_ON(ret);
 
WARN_ON(res->pin_count == 0);
if (--res->pin_count == 0 && res->backup) {
struct vmw_dma_buffer *vbo = res->backup;
 
ttm_bo_reserve(&vbo->base, false, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->base);
}
 
vmw_resource_unreserve(res, false, NULL, 0UL);
 
mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_read_unlock(&dev_priv->reservation_sem);
}
 
/**
* vmw_res_type - Return the resource type
*
* @res: Pointer to the resource
*/
enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
{
return res->func->res_type;
}
/drivers/video/drm/vmwgfx/vmwgfx_resource_priv.h
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
30,6 → 30,12
 
#include "vmwgfx_drv.h"
 
enum vmw_cmdbuf_res_state {
VMW_CMDBUF_RES_COMMITTED,
VMW_CMDBUF_RES_ADD,
VMW_CMDBUF_RES_DEL
};
 
/**
* struct vmw_user_resource_conv - Identify a derived user-exported resource
* type and provide a function to convert its ttm_base_object pointer to
55,8 → 61,10
* @bind: Bind a hardware resource to persistent buffer storage.
* @unbind: Unbind a hardware resource from persistent
* buffer storage.
* @commit_notify: If the resource is a command buffer managed resource,
* callback to notify that a define or remove command
* has been committed to the device.
*/
 
struct vmw_res_func {
enum vmw_res_type res_type;
bool needs_backup;
71,6 → 79,8
int (*unbind) (struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
void (*commit_notify)(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
};
 
int vmw_resource_alloc_id(struct vmw_resource *res);
/drivers/video/drm/vmwgfx/vmwgfx_scrn.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
26,6 → 26,7
**************************************************************************/
 
#include "vmwgfx_kms.h"
#include <drm/drm_plane_helper.h>
 
 
#define vmw_crtc_to_sou(x) \
35,10 → 36,55
#define vmw_connector_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.connector)
 
/**
* struct vmw_kms_sou_surface_dirty - Closure structure for
* blit surface to screen command.
* @base: The base type we derive from. Used by vmw_kms_helper_dirty().
* @left: Left side of bounding box.
* @right: Right side of bounding box.
* @top: Top side of bounding box.
* @bottom: Bottom side of bounding box.
* @dst_x: Difference between source clip rects and framebuffer coordinates.
* @dst_y: Difference between source clip rects and framebuffer coordinates.
* @sid: Surface id of surface to copy from.
*/
struct vmw_kms_sou_surface_dirty {
struct vmw_kms_dirty base;
s32 left, right, top, bottom;
s32 dst_x, dst_y;
u32 sid;
};
 
/*
* SVGA commands that are used by this code. Please see the device headers
* for explanation.
*/
struct vmw_kms_sou_readback_blit {
uint32 header;
SVGAFifoCmdBlitScreenToGMRFB body;
};
 
struct vmw_kms_sou_dmabuf_blit {
uint32 header;
SVGAFifoCmdBlitGMRFBToScreen body;
};
 
struct vmw_kms_sou_dirty_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
};
 
 
/*
* Other structs.
*/
 
struct vmw_screen_object_display {
unsigned num_implicit;
 
struct vmw_framebuffer *implicit_fb;
SVGAFifoCmdDefineGMRFB cur;
struct vmw_dma_buffer *pinned_gmrfb;
};
 
/**
56,7 → 102,7
 
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
{
// vmw_display_unit_cleanup(&sou->base);
vmw_du_cleanup(&sou->base);
kfree(sou);
}
 
201,14 → 247,7
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou)
{
struct ttm_buffer_object *bo;
 
if (unlikely(sou->buffer == NULL))
return;
 
bo = &sou->buffer->base;
ttm_bo_unref(&bo);
sou->buffer = NULL;
vmw_dmabuf_unreference(&sou->buffer);
sou->buffer_size = 0;
}
 
234,11 → 273,11
/* After we have alloced the backing store might not be able to
* resume the overlays, this is preferred to failing to alloc.
*/
// vmw_overlay_pause_all(dev_priv);
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
// vmw_overlay_resume_all(dev_priv);
vmw_overlay_resume_all(dev_priv);
 
if (unlikely(ret != 0))
sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
273,13 → 312,13
dev_priv = vmw_priv(crtc->dev);
 
if (set->num_connectors > 1) {
DRM_ERROR("to many connectors\n");
DRM_ERROR("Too many connectors\n");
return -EINVAL;
}
 
if (set->num_connectors == 1 &&
set->connectors[0] != &sou->base.connector) {
DRM_ERROR("connector doesn't match %p %p\n",
DRM_ERROR("Connector doesn't match %p %p\n",
set->connectors[0], &sou->base.connector);
return -EINVAL;
}
330,7 → 369,7
return -EINVAL;
}
 
// vmw_fb_off(dev_priv);
vmw_svga_enable(dev_priv);
 
if (mode->hdisplay != crtc->mode.hdisplay ||
mode->vdisplay != crtc->mode.vdisplay) {
389,6 → 428,38
return 0;
}
 
/**
* Returns if this unit can be page flipped.
* Must be called with the mode_config mutex held.
*/
static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
 
if (!sou->base.is_implicit)
return true;
 
if (dev_priv->sou_priv->num_implicit != 1)
return false;
 
return true;
}
 
/**
* Update the implicit fb to the current fb of this crtc.
* Must be called with the mode_config mutex held.
*/
static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
 
BUG_ON(!sou->base.is_implicit);
 
dev_priv->sou_priv->implicit_fb =
vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
}
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
422,7 → 493,7
vmw_sou_destroy(vmw_connector_to_sou(connector));
}
 
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
static struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.save = vmw_du_connector_save,
.restore = vmw_du_connector_restore,
457,7 → 528,7
sou->base.pref_mode = NULL;
sou->base.is_implicit = true;
 
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
connector->status = vmw_du_connector_detect(connector, true);
 
480,7 → 551,7
return 0;
}
 
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int i, ret;
504,6 → 575,9
dev_priv->sou_priv->num_implicit = 0;
dev_priv->sou_priv->implicit_fb = NULL;
 
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
if (unlikely(ret != 0))
goto err_free;
 
ret = drm_mode_create_dirty_info_property(dev);
if (unlikely(ret != 0))
512,12 → 586,14
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
 
DRM_INFO("Screen objects system initialized\n");
dev_priv->active_display_unit = vmw_du_screen_object;
 
DRM_INFO("Screen Objects Display Unit initialized\n");
 
return 0;
 
err_vblank_cleanup:
// drm_vblank_cleanup(dev);
drm_vblank_cleanup(dev);
err_free:
kfree(dev_priv->sou_priv);
dev_priv->sou_priv = NULL;
525,7 → 601,7
return ret;
}
 
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
532,7 → 608,7
if (!dev_priv->sou_priv)
return -ENOSYS;
 
// drm_vblank_cleanup(dev);
drm_vblank_cleanup(dev);
 
kfree(dev_priv->sou_priv);
 
539,86 → 615,377
return 0;
}
 
static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
struct vmw_dma_buffer *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf,
base)->buffer;
int depth = framebuffer->base.depth;
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
 
/* Emulate RGBA support, contrary to svga_reg.h this is not
* supported by hosts. This is only a problem if we are reading
* this value later and expecting what we uploaded back.
*/
if (depth == 32)
depth = 24;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (!cmd) {
DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
return -ENOMEM;
}
 
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
/* Buffer is reserved in vram or GMR */
vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* Returns if this unit can be page flipped.
* Must be called with the mode_config mutex held.
* vmw_sou_surface_fifo_commit - Callback to fill in and submit a
* blit surface to screen command.
*
* @dirty: The closure structure.
*
* Fills in the missing fields in the command, and translates the cliprects
* to match the destination bounding box encoded.
*/
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
struct vmw_kms_sou_surface_dirty *sdirty =
container_of(dirty, typeof(*sdirty), base);
struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
int i;
 
if (!sou->base.is_implicit)
return true;
cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
cmd->header.size = sizeof(cmd->body) + region_size;
 
if (dev_priv->sou_priv->num_implicit != 1)
return false;
/*
* Use the destination bounding box to specify destination - and
* source bounding regions.
*/
cmd->body.destRect.left = sdirty->left;
cmd->body.destRect.right = sdirty->right;
cmd->body.destRect.top = sdirty->top;
cmd->body.destRect.bottom = sdirty->bottom;
 
return true;
cmd->body.srcRect.left = sdirty->left + trans_x;
cmd->body.srcRect.right = sdirty->right + trans_x;
cmd->body.srcRect.top = sdirty->top + trans_y;
cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
 
cmd->body.srcImage.sid = sdirty->sid;
cmd->body.destScreenId = dirty->unit->unit;
 
/* Blits are relative to the destination rect. Translate. */
for (i = 0; i < dirty->num_hits; ++i, ++blit) {
blit->left -= sdirty->left;
blit->right -= sdirty->left;
blit->top -= sdirty->top;
blit->bottom -= sdirty->top;
}
 
vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
 
sdirty->left = sdirty->top = S32_MAX;
sdirty->right = sdirty->bottom = S32_MIN;
}
 
/**
* Update the implicit fb to the current fb of this crtc.
* Must be called with the mode_config mutex held.
* vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
*
* @dirty: The closure structure
*
* Encodes a SVGASignedRect cliprect and updates the bounding box of the
* BLIT_SURFACE_TO_SCREEN command.
*/
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
struct vmw_kms_sou_surface_dirty *sdirty =
container_of(dirty, typeof(*sdirty), base);
struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
 
BUG_ON(!sou->base.is_implicit);
/* Destination rect. */
blit += dirty->num_hits;
blit->left = dirty->unit_x1;
blit->top = dirty->unit_y1;
blit->right = dirty->unit_x2;
blit->bottom = dirty->unit_y2;
 
dev_priv->sou_priv->implicit_fb =
vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
/* Destination bounding box */
sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
 
dirty->num_hits++;
}
 
#include "bitmap.h"
/**
* vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
*
* @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the surface-buffer backed framebuffer.
* @clips: Array of clip rects. Either @clips or @vclips must be NULL.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
* @srf: Pointer to surface to blit from. If NULL, the surface attached
* to @framebuffer will be used.
* @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
* @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
* @num_clips: Number of clip rects in @clips.
* @inc: Increment to use when looping over @clips.
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
struct vmw_resource *srf,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence)
{
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty;
int ret;
 
typedef struct
if (!srf)
srf = &vfbs->surface->res;
 
ret = vmw_kms_helper_resource_prepare(srf, true);
if (ret)
return ret;
 
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
sdirty.base.clip = vmw_sou_surface_clip;
sdirty.base.dev_priv = dev_priv;
sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
sizeof(SVGASignedRect) * num_clips;
 
sdirty.sid = srf->id;
sdirty.left = sdirty.top = S32_MAX;
sdirty.right = sdirty.bottom = S32_MIN;
sdirty.dst_x = dest_x;
sdirty.dst_y = dest_y;
 
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
vmw_kms_helper_resource_finish(srf, out_fence);
 
return ret;
}
 
/**
* vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
*
* @dirty: The closure structure.
*
* Commits a previously built command buffer of readback clips.
*/
static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
{
kobj_t header;
vmw_fifo_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_dmabuf_blit) *
dirty->num_hits);
}
 
uint32_t *data;
uint32_t hot_x;
uint32_t hot_y;
/**
* vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
*
* @dirty: The closure structure
*
* Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
*/
static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
 
struct list_head list;
// struct drm_i915_gem_object *cobj;
}cursor_t;
blit += dirty->num_hits;
blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blit->body.destScreenId = dirty->unit->unit;
blit->body.srcOrigin.x = dirty->fb_x;
blit->body.srcOrigin.y = dirty->fb_y;
blit->body.destRect.left = dirty->unit_x1;
blit->body.destRect.top = dirty->unit_y1;
blit->body.destRect.right = dirty->unit_x2;
blit->body.destRect.bottom = dirty->unit_y2;
dirty->num_hits++;
}
 
/**
* vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
*
* @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the dma-buffer backed framebuffer.
* @clips: Array of clip rects.
* @num_clips: Number of clip rects in @clips.
* @increment: Increment to use when looping over @clips.
* @interruptible: Whether to perform waits interruptible if possible.
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
unsigned num_clips, int increment,
bool interruptible,
struct vmw_fence_obj **out_fence)
{
struct vmw_dma_buffer *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf,
base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
 
struct tag_display
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
false);
if (ret)
return ret;
 
ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0))
goto out_revert;
 
dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
dirty.clip = vmw_sou_dmabuf_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
0, 0, num_clips, increment, &dirty);
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
 
return ret;
 
out_revert:
vmw_kms_helper_buffer_revert(buf);
 
return ret;
}
 
 
/**
* vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
*
* @dirty: The closure structure.
*
* Commits a previously built command buffer of readback clips.
*/
static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
{
int x;
int y;
int width;
int height;
int bpp;
int vrefresh;
int pitch;
int lfb;
vmw_fifo_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_readback_blit) *
dirty->num_hits);
}
 
int supported_modes;
struct drm_device *ddev;
struct drm_connector *connector;
struct drm_crtc *crtc;
/**
* vmw_sou_readback_clip - Callback to encode a readback cliprect.
*
* @dirty: The closure structure
*
* Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
*/
static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
 
struct list_head cursors;
blit += dirty->num_hits;
blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
blit->body.srcScreenId = dirty->unit->unit;
blit->body.destOrigin.x = dirty->fb_x;
blit->body.destOrigin.y = dirty->fb_y;
blit->body.srcRect.left = dirty->unit_x1;
blit->body.srcRect.top = dirty->unit_y1;
blit->body.srcRect.right = dirty->unit_x2;
blit->body.srcRect.bottom = dirty->unit_y2;
dirty->num_hits++;
}
 
cursor_t *cursor;
int (*init_cursor)(cursor_t*);
cursor_t* (__stdcall *select_cursor)(cursor_t*);
void (*show_cursor)(int show);
void (__stdcall *move_cursor)(cursor_t *cursor, int x, int y);
void (__stdcall *restore_cursor)(int x, int y);
void (*disable_mouse)(void);
u32 mask_seqno;
u32 check_mouse;
u32 check_m_pixel;
u32 dirty;
void (*update)(void);
};
/**
* vmw_kms_sou_readback - Perform a readback from the screen object system to
* a dma-buffer backed framebuffer.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL.
* @vfb: Pointer to the dma-buffer backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
* @num_clips: Number of clip rects in @vclips.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
uint32_t num_clips)
{
struct vmw_dma_buffer *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
 
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
if (ret)
return ret;
 
ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0))
goto out_revert;
 
dirty.fifo_commit = vmw_sou_readback_fifo_commit;
dirty.clip = vmw_sou_readback_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
0, 0, num_clips, 1, &dirty);
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
user_fence_rep);
 
return ret;
 
out_revert:
vmw_kms_helper_buffer_revert(buf);
 
return ret;
}
 
#if 0
#include <display.h>
extern display_t os_display;
 
bool set_mode(struct drm_device *dev, struct drm_connector *connector,
videomode_t *reqmode, bool strict)
{
728,3 → 1095,4
 
return ret;
};
#endif
/drivers/video/drm/vmwgfx/vmwgfx_shader.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
27,6 → 27,7
 
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
#include "ttm/ttm_placement.h"
 
#define VMW_COMPAT_SHADER_HT_ORDER 12
36,6 → 37,8
struct vmw_resource res;
SVGA3dShaderType type;
uint32_t size;
uint8_t num_input_sig;
uint8_t num_output_sig;
};
 
struct vmw_user_shader {
43,50 → 46,19
struct vmw_shader shader;
};
 
/**
* enum vmw_compat_shader_state - Staging state for compat shaders
*/
enum vmw_compat_shader_state {
VMW_COMPAT_COMMITED,
VMW_COMPAT_ADD,
VMW_COMPAT_DEL
struct vmw_dx_shader {
struct vmw_resource res;
struct vmw_resource *ctx;
struct vmw_resource *cotable;
u32 id;
bool committed;
struct list_head cotable_head;
};
 
/**
* struct vmw_compat_shader - Metadata for compat shaders.
*
* @handle: The TTM handle of the guest backed shader.
* @tfile: The struct ttm_object_file the guest backed shader is registered
* with.
* @hash: Hash item for lookup.
* @head: List head for staging lists or the compat shader manager list.
* @state: Staging state.
*
* The structure is protected by the cmdbuf lock.
*/
struct vmw_compat_shader {
u32 handle;
struct ttm_object_file *tfile;
struct drm_hash_item hash;
struct list_head head;
enum vmw_compat_shader_state state;
};
static uint64_t vmw_user_shader_size;
static uint64_t vmw_shader_size;
static size_t vmw_shader_dx_size;
 
/**
* struct vmw_compat_shader_manager - Compat shader manager.
*
* @shaders: Hash table containing staged and commited compat shaders
* @list: List of commited shaders.
* @dev_priv: Pointer to a device private structure.
*
* @shaders and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_compat_shader_manager {
struct drm_open_hash shaders;
struct list_head list;
struct vmw_private *dev_priv;
};
 
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
99,6 → 71,18
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
 
static int vmw_dx_shader_create(struct vmw_resource *res);
static int vmw_dx_shader_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_dx_shader_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
static uint64_t vmw_user_shader_size;
 
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
121,6 → 105,24
.unbind = vmw_gb_shader_unbind
};
 
static const struct vmw_res_func vmw_dx_shader_func = {
.res_type = vmw_res_shader,
.needs_backup = true,
.may_evict = false,
.type_name = "dx shaders",
.backup_placement = &vmw_mob_placement,
.create = vmw_dx_shader_create,
/*
* The destroy callback is only called with a committed resource on
* context destroy, in which case we destroy the cotable anyway,
* so there's no need to destroy DX shaders separately.
*/
.destroy = NULL,
.bind = vmw_dx_shader_bind,
.unbind = vmw_dx_shader_unbind,
.commit_notify = vmw_dx_shader_commit_notify,
};
 
/**
* Shader management:
*/
131,16 → 133,34
return container_of(res, struct vmw_shader, res);
}
 
/**
* vmw_res_to_dx_shader - typecast a struct vmw_resource to a
* struct vmw_dx_shader
*
* @res: Pointer to the struct vmw_resource.
*/
static inline struct vmw_dx_shader *
vmw_res_to_dx_shader(struct vmw_resource *res)
{
return container_of(res, struct vmw_dx_shader, res);
}
 
static void vmw_hw_shader_destroy(struct vmw_resource *res)
{
(void) vmw_gb_shader_destroy(res);
if (likely(res->func->destroy))
(void) res->func->destroy(res);
else
res->id = -1;
}
 
 
static int vmw_gb_shader_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
uint32_t size,
uint64_t offset,
SVGA3dShaderType type,
uint8_t num_input_sig,
uint8_t num_output_sig,
struct vmw_dma_buffer *byte_code,
void (*res_free) (struct vmw_resource *res))
{
147,10 → 167,9
struct vmw_shader *shader = vmw_res_to_shader(res);
int ret;
 
ret = vmw_resource_init(dev_priv, res, true,
res_free, &vmw_gb_shader_func);
ret = vmw_resource_init(dev_priv, res, true, res_free,
&vmw_gb_shader_func);
 
 
if (unlikely(ret != 0)) {
if (res_free)
res_free(res);
166,11 → 185,17
}
shader->size = size;
shader->type = type;
shader->num_input_sig = num_input_sig;
shader->num_output_sig = num_output_sig;
 
vmw_resource_activate(res, vmw_hw_shader_destroy);
return 0;
}
 
/*
* GB shader code:
*/
 
static int vmw_gb_shader_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
209,7 → 234,7
cmd->body.type = shader->type;
cmd->body.sizeInBytes = shader->size;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
(void) vmw_3d_resource_inc(dev_priv, false);
vmw_fifo_resource_inc(dev_priv);
 
return 0;
 
242,7 → 267,7
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.mobid = bo->mem.start;
cmd->body.offsetInBytes = 0;
cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
303,7 → 328,7
return 0;
 
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_scrub(&res->binding_head);
vmw_binding_res_list_scrub(&res->binding_head);
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
319,7 → 344,7
vmw_fifo_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_3d_resource_dec(dev_priv, false);
vmw_fifo_resource_dec(dev_priv);
 
return 0;
}
375,6 → 400,8
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
uint8_t num_input_sig,
uint8_t num_output_sig,
struct ttm_object_file *tfile,
u32 *handle)
{
417,7 → 444,8
*/
 
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
offset, shader_type, buffer,
offset, shader_type, num_input_sig,
num_output_sig, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
441,20 → 469,71
}
 
 
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type)
{
struct vmw_shader *shader;
struct vmw_resource *res;
int ret;
 
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_shader_size == 0))
vmw_shader_size =
ttm_round_pot(sizeof(struct vmw_shader)) + 128;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
"creation.\n");
goto out_err;
}
 
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
if (unlikely(shader == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_shader_size);
ret = -ENOMEM;
goto out_err;
}
 
res = &shader->res;
 
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
offset, shader_type, 0, 0, buffer,
vmw_shader_free);
 
out_err:
return ret ? ERR_PTR(ret) : res;
}
 
 
static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
enum drm_vmw_shader_type shader_type_drm,
u32 buffer_handle, size_t size, size_t offset,
uint8_t num_input_sig, uint8_t num_output_sig,
uint32_t *shader_handle)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_dma_buffer *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
 
if (arg->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
&buffer);
if (buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
&buffer, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader "
"creation.\n");
462,7 → 541,7
}
 
if ((u64)buffer->base.num_pages * PAGE_SIZE <
(u64)arg->size + (u64)arg->offset) {
(u64)size + (u64)offset) {
DRM_ERROR("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
469,7 → 548,7
}
}
 
switch (arg->shader_type) {
switch (shader_type_drm) {
case drm_vmw_shader_type_vs:
shader_type = SVGA3D_SHADERTYPE_VS;
break;
476,9 → 555,6
case drm_vmw_shader_type_ps:
shader_type = SVGA3D_SHADERTYPE_PS;
break;
case drm_vmw_shader_type_gs:
shader_type = SVGA3D_SHADERTYPE_GS;
break;
default:
DRM_ERROR("Illegal shader type.\n");
ret = -EINVAL;
489,8 → 565,9
if (unlikely(ret != 0))
goto out_bad_arg;
 
ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
shader_type, tfile, &arg->shader_handle);
ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
 
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
499,7 → 576,7
}
 
/**
* vmw_compat_shader_id_ok - Check whether a compat shader user key and
* vmw_shader_id_ok - Check whether a compat shader user key and
* shader type are within valid bounds.
*
* @user_key: User space id of the shader.
507,13 → 584,13
*
* Returns true if valid false if not.
*/
static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
}
 
/**
* vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
* vmw_shader_key - Compute a hash key suitable for a compat shader.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
521,13 → 598,13
* Returns a hash key suitable for a command buffer managed resource
* manager hash table.
*/
static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key | (shader_type << 20);
}
 
/**
* vmw_compat_shader_remove - Stage a compat shader for removal.
* vmw_shader_remove - Stage a compat shader for removal.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
535,17 → 612,18
* @shader_type: Shader type.
* @list: Caller's list of staged command buffer resource actions.
*/
int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
if (!vmw_compat_shader_id_ok(user_key, shader_type))
struct vmw_resource *dummy;
 
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
 
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key,
shader_type),
list);
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type),
list, &dummy);
}
 
/**
575,7 → 653,7
int ret;
struct vmw_resource *res;
 
if (!vmw_compat_shader_id_ok(user_key, shader_type))
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
 
/* Allocate and pin a DMA buffer */
612,8 → 690,8
if (unlikely(ret != 0))
goto no_reserve;
 
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key, shader_type),
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type),
res, list);
vmw_resource_unreference(&res);
no_reserve:
623,7 → 701,7
}
 
/**
* vmw_compat_shader_lookup - Look up a compat shader
* vmw_shader_lookup - Look up a compat shader
*
* @man: Pointer to the command buffer managed resource manager identifying
* the shader namespace.
634,15 → 712,27
* found. An error pointer otherwise.
*/
struct vmw_resource *
vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
u32 user_key,
SVGA3dShaderType shader_type)
{
if (!vmw_compat_shader_id_ok(user_key, shader_type))
if (!vmw_shader_id_ok(user_key, shader_type))
return ERR_PTR(-EINVAL);
 
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key,
shader_type));
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
vmw_shader_key(user_key, shader_type));
}
 
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
 
return vmw_shader_define(dev, file_priv, arg->shader_type,
arg->buffer_handle,
arg->size, arg->offset,
0, 0,
&arg->shader_handle);
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_so.c
0,0 → 1,555
/**************************************************************************
* Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
 
/*
* The currently only reason we need to keep track of views is that if we
* destroy a hardware surface, all views pointing to it must also be destroyed,
* otherwise the device will error.
* So in particuar if a surface is evicted, we must destroy all views pointing
* to it, and all context bindings of that view. Similarly we must restore
* the view bindings, views and surfaces pointed to by the views when a
* context is referenced in the command stream.
*/
 
/**
* struct vmw_view - view metadata
*
* @res: The struct vmw_resource we derive from
* @ctx: Non-refcounted pointer to the context this view belongs to.
* @srf: Refcounted pointer to the surface pointed to by this view.
* @cotable: Refcounted pointer to the cotable holding this view.
* @srf_head: List head for the surface-to-view list.
* @cotable_head: List head for the cotable-to_view list.
* @view_type: View type.
* @view_id: User-space per context view id. Currently used also as per
* context device view id.
* @cmd_size: Size of the SVGA3D define view command that we've copied from the
* command stream.
* @committed: Whether the view is actually created or pending creation at the
* device level.
* @cmd: The SVGA3D define view command copied from the command stream.
*/
struct vmw_view {
struct rcu_head rcu;
struct vmw_resource res;
struct vmw_resource *ctx; /* Immutable */
struct vmw_resource *srf; /* Immutable */
struct vmw_resource *cotable; /* Immutable */
struct list_head srf_head; /* Protected by binding_mutex */
struct list_head cotable_head; /* Protected by binding_mutex */
unsigned view_type; /* Immutable */
unsigned view_id; /* Immutable */
u32 cmd_size; /* Immutable */
bool committed; /* Protected by binding_mutex */
u32 cmd[1]; /* Immutable */
};
 
static int vmw_view_create(struct vmw_resource *res);
static int vmw_view_destroy(struct vmw_resource *res);
static void vmw_hw_view_destroy(struct vmw_resource *res);
static void vmw_view_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
 
static const struct vmw_res_func vmw_view_func = {
.res_type = vmw_res_view,
.needs_backup = false,
.may_evict = false,
.type_name = "DX view",
.backup_placement = NULL,
.create = vmw_view_create,
.commit_notify = vmw_view_commit_notify,
};
 
/**
* struct vmw_view - view define command body stub
*
* @view_id: The device id of the view being defined
* @sid: The surface id of the view being defined
*
* This generic struct is used by the code to change @view_id and @sid of a
* saved view define command.
*/
struct vmw_view_define {
uint32 view_id;
uint32 sid;
};
 
/**
* vmw_view - Convert a struct vmw_resource to a struct vmw_view
*
* @res: Pointer to the resource to convert.
*
* Returns a pointer to a struct vmw_view.
*/
static struct vmw_view *vmw_view(struct vmw_resource *res)
{
return container_of(res, struct vmw_view, res);
}
 
/**
* vmw_view_commit_notify - Notify that a view operation has been committed to
* hardware from a user-supplied command stream.
*
* @res: Pointer to the view resource.
* @state: Indicating whether a creation or removal has been committed.
*
*/
static void vmw_view_commit_notify(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state)
{
struct vmw_view *view = vmw_view(res);
struct vmw_private *dev_priv = res->dev_priv;
 
mutex_lock(&dev_priv->binding_mutex);
if (state == VMW_CMDBUF_RES_ADD) {
struct vmw_surface *srf = vmw_res_to_srf(view->srf);
 
list_add_tail(&view->srf_head, &srf->view_list);
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
view->committed = true;
res->id = view->view_id;
 
} else {
list_del_init(&view->cotable_head);
list_del_init(&view->srf_head);
view->committed = false;
res->id = -1;
}
mutex_unlock(&dev_priv->binding_mutex);
}
 
/**
* vmw_view_create - Create a hardware view.
*
* @res: Pointer to the view resource.
*
* Create a hardware view. Typically used if that view has previously been
* destroyed by an eviction operation.
*/
static int vmw_view_create(struct vmw_resource *res)
{
struct vmw_view *view = vmw_view(res);
struct vmw_surface *srf = vmw_res_to_srf(view->srf);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
struct vmw_view_define body;
} *cmd;
 
mutex_lock(&dev_priv->binding_mutex);
if (!view->committed) {
mutex_unlock(&dev_priv->binding_mutex);
return 0;
}
 
cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
view->ctx->id);
if (!cmd) {
DRM_ERROR("Failed reserving FIFO space for view creation.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
memcpy(cmd, &view->cmd, view->cmd_size);
WARN_ON(cmd->body.view_id != view->view_id);
/* Sid may have changed due to surface eviction. */
WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
cmd->body.sid = view->srf->id;
vmw_fifo_commit(res->dev_priv, view->cmd_size);
res->id = view->view_id;
list_add_tail(&view->srf_head, &srf->view_list);
vmw_cotable_add_resource(view->cotable, &view->cotable_head);
mutex_unlock(&dev_priv->binding_mutex);
 
return 0;
}
 
/**
* vmw_view_destroy - Destroy a hardware view.
*
* @res: Pointer to the view resource.
*
* Destroy a hardware view. Typically used on unexpected termination of the
* owning process or if the surface the view is pointing to is destroyed.
*/
static int vmw_view_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_view *view = vmw_view(res);
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
} *cmd;
 
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
vmw_binding_res_list_scrub(&res->binding_head);
 
if (!view->committed || res->id == -1)
return 0;
 
cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
if (!cmd) {
DRM_ERROR("Failed reserving FIFO space for view "
"destruction.\n");
return -ENOMEM;
}
 
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
cmd->header.size = sizeof(cmd->body);
cmd->body.view_id = view->view_id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
res->id = -1;
list_del_init(&view->cotable_head);
list_del_init(&view->srf_head);
 
return 0;
}
 
/**
* vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
*
* @res: Pointer to the view resource.
*
* Destroy a hardware view if it's still present.
*/
static void vmw_hw_view_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
 
mutex_lock(&dev_priv->binding_mutex);
WARN_ON(vmw_view_destroy(res));
res->id = -1;
mutex_unlock(&dev_priv->binding_mutex);
}
 
/**
* vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
*
* @user_key: The user-space id used for the view.
* @view_type: The view type.
*
* Destroy a hardware view if it's still present.
*/
static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
{
return user_key | (view_type << 20);
}
 
/**
* vmw_view_id_ok - Basic view id and type range checks.
*
* @user_key: The user-space id used for the view.
* @view_type: The view type.
*
* Checks that the view id and type (typically provided by user-space) is
* valid.
*/
static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
{
return (user_key < SVGA_COTABLE_MAX_IDS &&
view_type < vmw_view_max);
}
 
/**
* vmw_view_res_free - resource res_free callback for view resources
*
* @res: Pointer to a struct vmw_resource
*
* Frees memory and memory accounting held by a struct vmw_view.
*/
static void vmw_view_res_free(struct vmw_resource *res)
{
struct vmw_view *view = vmw_view(res);
size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
struct vmw_private *dev_priv = res->dev_priv;
 
vmw_resource_unreference(&view->cotable);
vmw_resource_unreference(&view->srf);
kfree_rcu(view, rcu);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
 
/**
* vmw_view_add - Create a view resource and stage it for addition
* as a command buffer managed resource.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @ctx: Pointer to a struct vmw_resource identifying the active context.
* @srf: Pointer to a struct vmw_resource identifying the surface the view
* points to.
* @view_type: The view type deduced from the view create command.
* @user_key: The key that is used to identify the shader. The key is
* unique to the view type and to the context.
* @cmd: Pointer to the view create command in the command stream.
* @cmd_size: Size of the view create command in the command stream.
* @list: Caller's list of staged command buffer resource actions.
*/
int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource *ctx,
struct vmw_resource *srf,
enum vmw_view_type view_type,
u32 user_key,
const void *cmd,
size_t cmd_size,
struct list_head *list)
{
static const size_t vmw_view_define_sizes[] = {
[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
};
 
struct vmw_private *dev_priv = ctx->dev_priv;
struct vmw_resource *res;
struct vmw_view *view;
size_t size;
int ret;
 
if (cmd_size != vmw_view_define_sizes[view_type] +
sizeof(SVGA3dCmdHeader)) {
DRM_ERROR("Illegal view create command size.\n");
return -EINVAL;
}
 
if (!vmw_view_id_ok(user_key, view_type)) {
DRM_ERROR("Illegal view add view id.\n");
return -EINVAL;
}
 
size = offsetof(struct vmw_view, cmd) + cmd_size;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for view"
" creation.\n");
return ret;
}
 
view = kmalloc(size, GFP_KERNEL);
if (!view) {
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
return -ENOMEM;
}
 
res = &view->res;
view->ctx = ctx;
view->srf = vmw_resource_reference(srf);
view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
view->view_type = view_type;
view->view_id = user_key;
view->cmd_size = cmd_size;
view->committed = false;
INIT_LIST_HEAD(&view->srf_head);
INIT_LIST_HEAD(&view->cotable_head);
memcpy(&view->cmd, cmd, cmd_size);
ret = vmw_resource_init(dev_priv, res, true,
vmw_view_res_free, &vmw_view_func);
if (ret)
goto out_resource_init;
 
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
vmw_view_key(user_key, view_type),
res, list);
if (ret)
goto out_resource_init;
 
res->id = view->view_id;
vmw_resource_activate(res, vmw_hw_view_destroy);
 
out_resource_init:
vmw_resource_unreference(&res);
 
return ret;
}
 
/**
* vmw_view_remove - Stage a view for removal.
*
* @man: Pointer to the view manager identifying the shader namespace.
* @user_key: The key that is used to identify the view. The key is
* unique to the view type.
* @view_type: View type
* @list: Caller's list of staged command buffer resource actions.
* @res_p: If the resource is in an already committed state, points to the
* struct vmw_resource on successful return. The pointer will be
* non ref-counted.
*/
int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, enum vmw_view_type view_type,
struct list_head *list,
struct vmw_resource **res_p)
{
if (!vmw_view_id_ok(user_key, view_type)) {
DRM_ERROR("Illegal view remove view id.\n");
return -EINVAL;
}
 
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
vmw_view_key(user_key, view_type),
list, res_p);
}
 
/**
* vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
*
* @dev_priv: Pointer to a device private struct.
* @list: List of views belonging to a cotable.
* @readback: Unused. Needed for function interface only.
*
* This function evicts all views belonging to a cotable.
* It must be called with the binding_mutex held, and the caller must hold
* a reference to the view resource. This is typically called before the
* cotable is paged out.
*/
void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
struct list_head *list,
bool readback)
{
struct vmw_view *entry, *next;
 
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
 
list_for_each_entry_safe(entry, next, list, cotable_head)
WARN_ON(vmw_view_destroy(&entry->res));
}
 
/**
* vmw_view_surface_list_destroy - Evict all views pointing to a surface
*
* @dev_priv: Pointer to a device private struct.
* @list: List of views pointing to a surface.
*
* This function evicts all views pointing to a surface. This is typically
* called before the surface is evicted.
*/
void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
struct list_head *list)
{
struct vmw_view *entry, *next;
 
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
 
list_for_each_entry_safe(entry, next, list, srf_head)
WARN_ON(vmw_view_destroy(&entry->res));
}
 
/**
* vmw_view_srf - Return a non-refcounted pointer to the surface a view is
* pointing to.
*
* @res: pointer to a view resource.
*
* Note that the view itself is holding a reference, so as long
* the view resource is alive, the surface resource will be.
*/
struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
{
return vmw_view(res)->srf;
}
 
/**
* vmw_view_lookup - Look up a view.
*
* @man: The context's cmdbuf ref manager.
* @view_type: The view type.
* @user_key: The view user id.
*
* returns a refcounted pointer to a view or an error pointer if not found.
*/
struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_view_type view_type,
u32 user_key)
{
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
vmw_view_key(user_key, view_type));
}
 
const u32 vmw_view_destroy_cmds[] = {
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
};
 
const SVGACOTableType vmw_view_cotables[] = {
[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
};
 
const SVGACOTableType vmw_so_cotables[] = {
[vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
[vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
[vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
[vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
[vmw_so_ss] = SVGA_COTABLE_SAMPLER,
[vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
};
 
 
/* To remove unused function warning */
static void vmw_so_build_asserts(void) __attribute__((used));
 
 
/*
* This function is unused at run-time, and only used to dump various build
* asserts important for code optimization assumptions.
*/
static void vmw_so_build_asserts(void)
{
/* Assert that our vmw_view_cmd_to_type() function is correct. */
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
 
/* Assert that our "one body fits all" assumption is valid */
BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
 
/* Assert that the view key space can hold all view ids. */
BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
 
/*
* Assert that the offset of sid in all view define commands
* is what we assume it to be.
*/
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
}
/drivers/video/drm/vmwgfx/vmwgfx_so.h
0,0 → 1,160
/**************************************************************************
* Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef VMW_SO_H
#define VMW_SO_H
 
enum vmw_view_type {
vmw_view_sr,
vmw_view_rt,
vmw_view_ds,
vmw_view_max,
};
 
enum vmw_so_type {
vmw_so_el,
vmw_so_bs,
vmw_so_ds,
vmw_so_rs,
vmw_so_ss,
vmw_so_so,
vmw_so_max,
};
 
/**
* union vmw_view_destroy - view destruction command body
*
* @rtv: RenderTarget view destruction command body
* @srv: ShaderResource view destruction command body
* @dsv: DepthStencil view destruction command body
* @view_id: A single u32 view id.
*
* The assumption here is that all union members are really represented by a
* single u32 in the command stream. If that's not the case,
* the size of this union will not equal the size of an u32, and the
* assumption is invalid, and we detect that at compile time in the
* vmw_so_build_asserts() function.
*/
union vmw_view_destroy {
struct SVGA3dCmdDXDestroyRenderTargetView rtv;
struct SVGA3dCmdDXDestroyShaderResourceView srv;
struct SVGA3dCmdDXDestroyDepthStencilView dsv;
u32 view_id;
};
 
/* Map enum vmw_view_type to view destroy command ids*/
extern const u32 vmw_view_destroy_cmds[];
 
/* Map enum vmw_view_type to SVGACOTableType */
extern const SVGACOTableType vmw_view_cotables[];
 
/* Map enum vmw_so_type to SVGACOTableType */
extern const SVGACOTableType vmw_so_cotables[];
 
/*
* vmw_view_cmd_to_type - Return the view type for a create or destroy command
*
* @id: The SVGA3D command id.
*
* For a given view create or destroy command id, return the corresponding
* enum vmw_view_type. If the command is unknown, return vmw_view_max.
* The validity of the simplified calculation is verified in the
* vmw_so_build_asserts() function.
*/
static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
{
u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
 
if (tmp > (u32)vmw_view_max)
return vmw_view_max;
 
return (enum vmw_view_type) tmp;
}
 
/*
* vmw_so_cmd_to_type - Return the state object type for a
* create or destroy command
*
* @id: The SVGA3D command id.
*
* For a given state object create or destroy command id,
* return the corresponding enum vmw_so_type. If the command is uknown,
* return vmw_so_max. We should perhaps optimize this function using
* a similar strategy as vmw_view_cmd_to_type().
*/
static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
{
switch (id) {
case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
return vmw_so_el;
case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
return vmw_so_bs;
case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
return vmw_so_ds;
case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
return vmw_so_rs;
case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
return vmw_so_ss;
case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
return vmw_so_so;
default:
break;
}
return vmw_so_max;
}
 
/*
* View management - vmwgfx_so.c
*/
extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource *ctx,
struct vmw_resource *srf,
enum vmw_view_type view_type,
u32 user_key,
const void *cmd,
size_t cmd_size,
struct list_head *list);
 
extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, enum vmw_view_type view_type,
struct list_head *list,
struct vmw_resource **res_p);
 
extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
struct list_head *view_list);
extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
struct list_head *list,
bool readback);
extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_view_type view_type,
u32 user_key);
#endif
/drivers/video/drm/vmwgfx/vmwgfx_stdu.c
0,0 → 1,1268
/******************************************************************************
*
* COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
******************************************************************************/
 
#include "vmwgfx_kms.h"
#include "device_include/svga3d_surfacedefs.h"
#include <drm/drm_plane_helper.h>
 
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
#define vmw_encoder_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.encoder)
#define vmw_connector_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.connector)
 
 
 
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
SEPARATE_SURFACE,
SEPARATE_DMA
};
 
/**
* struct vmw_stdu_dirty - closure structure for the update functions
*
* @base: The base type we derive from. Used by vmw_kms_helper_dirty().
* @transfer: Transfer direction for DMA command.
* @left: Left side of bounding box.
* @right: Right side of bounding box.
* @top: Top side of bounding box.
* @bottom: Bottom side of bounding box.
* @buf: DMA buffer when DMA-ing between buffer and screen targets.
* @sid: Surface ID when copying between surface and screen targets.
*/
struct vmw_stdu_dirty {
struct vmw_kms_dirty base;
SVGA3dTransferType transfer;
s32 left, right, top, bottom;
u32 pitch;
union {
struct vmw_dma_buffer *buf;
u32 sid;
};
};
 
/*
* SVGA commands that are used by this code. Please see the device headers
* for explanation.
*/
struct vmw_stdu_update {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBScreenTarget body;
};
 
struct vmw_stdu_dma {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA body;
};
 
struct vmw_stdu_surface_copy {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
};
 
 
/**
* struct vmw_screen_target_display_unit
*
* @base: VMW specific DU structure
* @display_srf: surface to be displayed. The dimension of this will always
* match the display mode. If the display mode matches
* content_vfbs dimensions, then this is a pointer into the
* corresponding field in content_vfbs. If not, then this
* is a separate buffer to which content_vfbs will blit to.
* @content_fb: holds the rendered content, can be a surface or DMA buffer
* @content_type: content_fb type
* @defined: true if the current display unit has been initialized
*/
struct vmw_screen_target_display_unit {
struct vmw_display_unit base;
 
struct vmw_surface *display_srf;
struct drm_framebuffer *content_fb;
 
enum stdu_content_type content_fb_type;
 
bool defined;
};
 
 
 
static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
 
 
 
/******************************************************************************
* Screen Target Display Unit helper Functions
*****************************************************************************/
 
/**
* vmw_stdu_pin_display - pins the resource associated with the display surface
*
* @stdu: contains the display surface
*
* Since the display surface can either be a private surface allocated by us,
* or it can point to the content surface, we use this function to not pin the
* same resource twice.
*/
static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
{
return vmw_resource_pin(&stdu->display_srf->res, false);
}
 
 
 
/**
* vmw_stdu_unpin_display - unpins the resource associated with display surface
*
* @stdu: contains the display surface
*
* If the display surface was privatedly allocated by
* vmw_surface_gb_priv_define() and not registered as a framebuffer, then it
* won't be automatically cleaned up when all the framebuffers are freed. As
* such, we have to explicitly call vmw_resource_unreference() to get it freed.
*/
static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
{
if (stdu->display_srf) {
struct vmw_resource *res = &stdu->display_srf->res;
 
vmw_resource_unpin(res);
 
if (stdu->content_fb_type != SAME_AS_DISPLAY) {
vmw_resource_unreference(&res);
stdu->content_fb_type = SAME_AS_DISPLAY;
}
 
stdu->display_srf = NULL;
}
}
 
 
 
/******************************************************************************
* Screen Target Display Unit CRTC Functions
*****************************************************************************/
 
 
/**
* vmw_stdu_crtc_destroy - cleans up the STDU
*
* @crtc: used to get a reference to the containing STDU
*/
static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
{
vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
}
 
/**
* vmw_stdu_define_st - Defines a Screen Target
*
* @dev_priv: VMW DRM device
* @stdu: display unit to create a Screen Target for
*
* Creates a STDU that we can used later. This function is called whenever the
* framebuffer size changes.
*
* RETURNs:
* 0 on success, error code on failure
*/
static int vmw_stdu_define_st(struct vmw_private *dev_priv,
struct vmw_screen_target_display_unit *stdu)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBScreenTarget body;
} *cmd;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 
if (unlikely(cmd == NULL)) {
DRM_ERROR("Out of FIFO space defining Screen Target\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
 
cmd->body.stid = stdu->base.unit;
cmd->body.width = stdu->display_srf->base_size.width;
cmd->body.height = stdu->display_srf->base_size.height;
cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
cmd->body.dpi = 0;
cmd->body.xRoot = stdu->base.crtc.x;
cmd->body.yRoot = stdu->base.crtc.y;
 
if (!stdu->base.is_implicit) {
cmd->body.xRoot = stdu->base.gui_x;
cmd->body.yRoot = stdu->base.gui_y;
}
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
stdu->defined = true;
 
return 0;
}
 
 
 
/**
* vmw_stdu_bind_st - Binds a surface to a Screen Target
*
* @dev_priv: VMW DRM device
* @stdu: display unit affected
* @res: Buffer to bind to the screen target. Set to NULL to blank screen.
*
* Binding a surface to a Screen Target the same as flipping
*/
static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
struct vmw_screen_target_display_unit *stdu,
struct vmw_resource *res)
{
SVGA3dSurfaceImageId image;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBScreenTarget body;
} *cmd;
 
 
if (!stdu->defined) {
DRM_ERROR("No screen target defined\n");
return -EINVAL;
}
 
/* Set up image using information in vfb */
memset(&image, 0, sizeof(image));
image.sid = res ? res->id : SVGA3D_INVALID_ID;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 
if (unlikely(cmd == NULL)) {
DRM_ERROR("Out of FIFO space binding a screen target\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
 
cmd->body.stid = stdu->base.unit;
cmd->body.image = image;
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
/**
* vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
* bounding box.
*
* @cmd: Pointer to command stream.
* @unit: Screen target unit.
* @left: Left side of bounding box.
* @right: Right side of bounding box.
* @top: Top side of bounding box.
* @bottom: Bottom side of bounding box.
*/
static void vmw_stdu_populate_update(void *cmd, int unit,
s32 left, s32 right, s32 top, s32 bottom)
{
struct vmw_stdu_update *update = cmd;
 
update->header.id = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
update->header.size = sizeof(update->body);
 
update->body.stid = unit;
update->body.rect.x = left;
update->body.rect.y = top;
update->body.rect.w = right - left;
update->body.rect.h = bottom - top;
}
 
/**
* vmw_stdu_update_st - Full update of a Screen Target
*
* @dev_priv: VMW DRM device
* @stdu: display unit affected
*
* This function needs to be called whenever the content of a screen
* target has changed completely. Typically as a result of a backing
* surface change.
*
* RETURNS:
* 0 on success, error code on failure
*/
static int vmw_stdu_update_st(struct vmw_private *dev_priv,
struct vmw_screen_target_display_unit *stdu)
{
struct vmw_stdu_update *cmd;
struct drm_crtc *crtc = &stdu->base.crtc;
 
if (!stdu->defined) {
DRM_ERROR("No screen target defined");
return -EINVAL;
}
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 
if (unlikely(cmd == NULL)) {
DRM_ERROR("Out of FIFO space updating a Screen Target\n");
return -ENOMEM;
}
 
vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay,
0, crtc->mode.vdisplay);
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
 
 
 
/**
* vmw_stdu_destroy_st - Destroy a Screen Target
*
* @dev_priv: VMW DRM device
* @stdu: display unit to destroy
*/
static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
struct vmw_screen_target_display_unit *stdu)
{
int ret;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBScreenTarget body;
} *cmd;
 
 
/* Nothing to do if not successfully defined */
if (unlikely(!stdu->defined))
return 0;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 
if (unlikely(cmd == NULL)) {
DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
 
cmd->body.stid = stdu->base.unit;
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
/* Force sync */
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
if (unlikely(ret != 0))
DRM_ERROR("Failed to sync with HW");
 
stdu->defined = false;
 
return ret;
}
 
 
 
/**
* vmw_stdu_crtc_set_config - Sets a mode
*
* @set: mode parameters
*
* This function is the device-specific portion of the DRM CRTC mode set.
* For the SVGA device, we do this by defining a Screen Target, binding a
* GB Surface to that target, and finally update the screen target.
*
* RETURNS:
* 0 on success, error code otherwise
*/
static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
struct vmw_framebuffer *vfb;
struct vmw_framebuffer_surface *new_vfbs;
struct drm_display_mode *mode;
struct drm_framebuffer *new_fb;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
 
 
if (!set || !set->crtc)
return -EINVAL;
 
crtc = set->crtc;
crtc->x = set->x;
crtc->y = set->y;
stdu = vmw_crtc_to_stdu(crtc);
mode = set->mode;
new_fb = set->fb;
dev_priv = vmw_priv(crtc->dev);
 
 
if (set->num_connectors > 1) {
DRM_ERROR("Too many connectors\n");
return -EINVAL;
}
 
if (set->num_connectors == 1 &&
set->connectors[0] != &stdu->base.connector) {
DRM_ERROR("Connectors don't match %p %p\n",
set->connectors[0], &stdu->base.connector);
return -EINVAL;
}
 
 
/* Since they always map one to one these are safe */
connector = &stdu->base.connector;
encoder = &stdu->base.encoder;
 
 
/*
* After this point the CRTC will be considered off unless a new fb
* is bound
*/
if (stdu->defined) {
/* Unbind current surface by binding an invalid one */
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
if (unlikely(ret != 0))
return ret;
 
/* Update Screen Target, display will now be blank */
if (crtc->primary->fb) {
vmw_stdu_update_st(dev_priv, stdu);
if (unlikely(ret != 0))
return ret;
}
 
crtc->primary->fb = NULL;
crtc->enabled = false;
encoder->crtc = NULL;
connector->encoder = NULL;
 
vmw_stdu_unpin_display(stdu);
stdu->content_fb = NULL;
stdu->content_fb_type = SAME_AS_DISPLAY;
 
ret = vmw_stdu_destroy_st(dev_priv, stdu);
/* The hardware is hung, give up */
if (unlikely(ret != 0))
return ret;
}
 
 
/* Any of these conditions means the caller wants CRTC off */
if (set->num_connectors == 0 || !mode || !new_fb)
return 0;
 
 
if (set->x + mode->hdisplay > new_fb->width ||
set->y + mode->vdisplay > new_fb->height) {
DRM_ERROR("Set outside of framebuffer\n");
return -EINVAL;
}
 
stdu->content_fb = new_fb;
vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
 
if (vfb->dmabuf)
stdu->content_fb_type = SEPARATE_DMA;
 
/*
* If the requested mode is different than the width and height
* of the FB or if the content buffer is a DMA buf, then allocate
* a display FB that matches the dimension of the mode
*/
if (mode->hdisplay != new_fb->width ||
mode->vdisplay != new_fb->height ||
stdu->content_fb_type != SAME_AS_DISPLAY) {
struct vmw_surface content_srf;
struct drm_vmw_size display_base_size = {0};
struct vmw_surface *display_srf;
 
 
display_base_size.width = mode->hdisplay;
display_base_size.height = mode->vdisplay;
display_base_size.depth = 1;
 
/*
* If content buffer is a DMA buf, then we have to construct
* surface info
*/
if (stdu->content_fb_type == SEPARATE_DMA) {
 
switch (new_fb->bits_per_pixel) {
case 32:
content_srf.format = SVGA3D_X8R8G8B8;
break;
 
case 16:
content_srf.format = SVGA3D_R5G6B5;
break;
 
case 8:
content_srf.format = SVGA3D_P8;
break;
 
default:
DRM_ERROR("Invalid format\n");
ret = -EINVAL;
goto err_unref_content;
}
 
content_srf.flags = 0;
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
} else {
 
stdu->content_fb_type = SEPARATE_SURFACE;
 
new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
content_srf = *new_vfbs->surface;
}
 
 
ret = vmw_surface_gb_priv_define(crtc->dev,
0, /* because kernel visible only */
content_srf.flags,
content_srf.format,
true, /* a scanout buffer */
content_srf.mip_levels[0],
content_srf.multisample_count,
0,
display_base_size,
&display_srf);
if (unlikely(ret != 0)) {
DRM_ERROR("Cannot allocate a display FB.\n");
goto err_unref_content;
}
 
stdu->display_srf = display_srf;
} else {
new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
stdu->display_srf = new_vfbs->surface;
}
 
 
ret = vmw_stdu_pin_display(stdu);
if (unlikely(ret != 0)) {
stdu->display_srf = NULL;
goto err_unref_content;
}
 
vmw_svga_enable(dev_priv);
 
/*
* Steps to displaying a surface, assume surface is already
* bound:
* 1. define a screen target
* 2. bind a fb to the screen target
* 3. update that screen target (this is done later by
* vmw_kms_stdu_do_surface_dirty_or_present)
*/
ret = vmw_stdu_define_st(dev_priv, stdu);
if (unlikely(ret != 0))
goto err_unpin_display_and_content;
 
ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
if (unlikely(ret != 0))
goto err_unpin_destroy_st;
 
 
connector->encoder = encoder;
encoder->crtc = crtc;
 
crtc->mode = *mode;
crtc->primary->fb = new_fb;
crtc->enabled = true;
 
return ret;
 
err_unpin_destroy_st:
vmw_stdu_destroy_st(dev_priv, stdu);
err_unpin_display_and_content:
vmw_stdu_unpin_display(stdu);
err_unref_content:
stdu->content_fb = NULL;
return ret;
}
 
 
 
/**
* vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
*
* @crtc: CRTC to attach FB to
* @fb: FB to attach
* @event: Event to be posted. This event should've been alloced
* using k[mz]alloc, and should've been completely initialized.
* @page_flip_flags: Input flags.
*
* If the STDU uses the same display and content buffers, i.e. a true flip,
* this function will replace the existing display buffer with the new content
* buffer.
*
* If the STDU uses different display and content buffers, i.e. a blit, then
* only the content buffer will be updated.
*
* RETURNS:
* 0 on success, error code on failure
*/
static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *new_fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
 
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_screen_target_display_unit *stdu;
int ret;
 
if (crtc == NULL)
return -EINVAL;
 
dev_priv = vmw_priv(crtc->dev);
stdu = vmw_crtc_to_stdu(crtc);
crtc->primary->fb = new_fb;
stdu->content_fb = new_fb;
 
if (stdu->display_srf) {
/*
* If the display surface is the same as the content surface
* then remove the reference
*/
if (stdu->content_fb_type == SAME_AS_DISPLAY) {
if (stdu->defined) {
/* Unbind the current surface */
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
if (unlikely(ret != 0))
goto err_out;
}
vmw_stdu_unpin_display(stdu);
stdu->display_srf = NULL;
}
}
 
 
if (!new_fb) {
/* Blanks the display */
(void) vmw_stdu_update_st(dev_priv, stdu);
 
return 0;
}
 
 
if (stdu->content_fb_type == SAME_AS_DISPLAY) {
stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
ret = vmw_stdu_pin_display(stdu);
if (ret) {
stdu->display_srf = NULL;
goto err_out;
}
 
/* Bind display surface */
ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
if (unlikely(ret != 0))
goto err_unpin_display_and_content;
}
 
/* Update display surface: after this point everything is bound */
ret = vmw_stdu_update_st(dev_priv, stdu);
if (unlikely(ret != 0))
return ret;
 
if (event) {
struct vmw_fence_obj *fence = NULL;
struct drm_file *file_priv = event->base.file_priv;
 
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
if (!fence)
return -ENOMEM;
 
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
&event->event.tv_sec,
&event->event.tv_usec,
true);
vmw_fence_obj_unreference(&fence);
} else {
vmw_fifo_flush(dev_priv, false);
}
 
return ret;
 
err_unpin_display_and_content:
vmw_stdu_unpin_display(stdu);
err_out:
crtc->primary->fb = NULL;
stdu->content_fb = NULL;
return ret;
}
 
 
/**
* vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
*
* @dirty: The closure structure.
*
* Encodes a surface DMA command cliprect and updates the bounding box
* for the DMA.
*/
static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
struct vmw_stdu_dma *cmd = dirty->cmd;
struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
 
blit += dirty->num_hits;
blit->srcx = dirty->fb_x;
blit->srcy = dirty->fb_y;
blit->x = dirty->unit_x1;
blit->y = dirty->unit_y1;
blit->d = 1;
blit->w = dirty->unit_x2 - dirty->unit_x1;
blit->h = dirty->unit_y2 - dirty->unit_y1;
dirty->num_hits++;
 
if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
return;
 
/* Destination bounding box */
ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
}
 
/**
* vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
*
* @dirty: The closure structure.
*
* Fills in the missing fields in a DMA command, and optionally encodes
* a screen target update command, depending on transfer direction.
*/
static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *ddirty =
container_of(dirty, struct vmw_stdu_dirty, base);
struct vmw_screen_target_display_unit *stdu =
container_of(dirty->unit, typeof(*stdu), base);
struct vmw_stdu_dma *cmd = dirty->cmd;
struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
SVGA3dCmdSurfaceDMASuffix *suffix =
(SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
 
if (!dirty->num_hits) {
vmw_fifo_commit(dirty->dev_priv, 0);
return;
}
 
cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
cmd->header.size = sizeof(cmd->body) + blit_size;
vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
cmd->body.guest.pitch = ddirty->pitch;
cmd->body.host.sid = stdu->display_srf->res.id;
cmd->body.host.face = 0;
cmd->body.host.mipmap = 0;
cmd->body.transfer = ddirty->transfer;
suffix->suffixSize = sizeof(*suffix);
suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
 
if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
blit_size += sizeof(struct vmw_stdu_update);
 
vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
ddirty->left, ddirty->right,
ddirty->top, ddirty->bottom);
}
 
vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
 
ddirty->left = ddirty->top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
}
 
/**
* vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
* framebuffer and the screen target system.
*
* @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm-file identifying the caller. May be
* set to NULL, but then @user_fence_rep must also be set to NULL.
* @vfb: Pointer to the dma-buffer backed framebuffer.
* @clips: Array of clip rects. Either @clips or @vclips must be NULL.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
* @num_clips: Number of clip rects in @clips or @vclips.
* @increment: Increment to use when looping over @clips or @vclips.
* @to_surface: Whether to DMA to the screen target system as opposed to
* from the screen target system.
* @interruptible: Whether to perform waits interruptible if possible.
*
* If DMA-ing till the screen target system, the function will also notify
* the screen target system that a bounding box of the cliprects has been
* updated.
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
uint32_t num_clips,
int increment,
bool to_surface,
bool interruptible)
{
struct vmw_dma_buffer *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
 
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
false);
if (ret)
return ret;
 
ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX;
ddirty.right = ddirty.bottom = S32_MIN;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
ddirty.base.clip = vmw_stdu_dmabuf_clip;
ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
num_clips * sizeof(SVGA3dCopyBox) +
sizeof(SVGA3dCmdSurfaceDMASuffix);
if (to_surface)
ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
 
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
0, 0, num_clips, increment, &ddirty.base);
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
user_fence_rep);
 
return ret;
}
 
/**
* vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
*
* @dirty: The closure structure.
*
* Encodes a surface copy command cliprect and updates the bounding box
* for the copy.
*/
static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *sdirty =
container_of(dirty, struct vmw_stdu_dirty, base);
struct vmw_stdu_surface_copy *cmd = dirty->cmd;
struct vmw_screen_target_display_unit *stdu =
container_of(dirty->unit, typeof(*stdu), base);
 
if (sdirty->sid != stdu->display_srf->res.id) {
struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
 
blit += dirty->num_hits;
blit->srcx = dirty->fb_x;
blit->srcy = dirty->fb_y;
blit->x = dirty->unit_x1;
blit->y = dirty->unit_y1;
blit->d = 1;
blit->w = dirty->unit_x2 - dirty->unit_x1;
blit->h = dirty->unit_y2 - dirty->unit_y1;
}
 
dirty->num_hits++;
 
/* Destination bounding box */
sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
}
 
/**
* vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
* copy command.
*
* @dirty: The closure structure.
*
* Fills in the missing fields in a surface copy command, and encodes a screen
* target update command.
*/
static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
{
struct vmw_stdu_dirty *sdirty =
container_of(dirty, struct vmw_stdu_dirty, base);
struct vmw_screen_target_display_unit *stdu =
container_of(dirty->unit, typeof(*stdu), base);
struct vmw_stdu_surface_copy *cmd = dirty->cmd;
struct vmw_stdu_update *update;
size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
size_t commit_size;
 
if (!dirty->num_hits) {
vmw_fifo_commit(dirty->dev_priv, 0);
return;
}
 
if (sdirty->sid != stdu->display_srf->res.id) {
struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
 
cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
cmd->header.size = sizeof(cmd->body) + blit_size;
cmd->body.src.sid = sdirty->sid;
cmd->body.dest.sid = stdu->display_srf->res.id;
update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
} else {
update = dirty->cmd;
commit_size = sizeof(*update);
}
 
vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
sdirty->right, sdirty->top, sdirty->bottom);
 
vmw_fifo_commit(dirty->dev_priv, commit_size);
 
sdirty->left = sdirty->top = S32_MAX;
sdirty->right = sdirty->bottom = S32_MIN;
}
 
/**
* vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
*
* @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the surface-buffer backed framebuffer.
* @clips: Array of clip rects. Either @clips or @vclips must be NULL.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
* @srf: Pointer to surface to blit from. If NULL, the surface attached
* to @framebuffer will be used.
* @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
* @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
* @num_clips: Number of clip rects in @clips.
* @inc: Increment to use when looping over @clips.
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
struct vmw_resource *srf,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence)
{
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty;
int ret;
 
if (!srf)
srf = &vfbs->surface->res;
 
ret = vmw_kms_helper_resource_prepare(srf, true);
if (ret)
return ret;
 
if (vfbs->is_dmabuf_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret)
goto out_finish;
}
 
sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
sdirty.base.clip = vmw_kms_stdu_surface_clip;
sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
sizeof(SVGA3dCopyBox) * num_clips +
sizeof(struct vmw_stdu_update);
sdirty.sid = srf->id;
sdirty.left = sdirty.top = S32_MAX;
sdirty.right = sdirty.bottom = S32_MIN;
 
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
out_finish:
vmw_kms_helper_resource_finish(srf, out_fence);
 
return ret;
}
 
 
/*
* Screen Target CRTC dispatch table
*/
static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
// .cursor_set2 = vmw_du_crtc_cursor_set2,
// .cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_stdu_crtc_destroy,
.set_config = vmw_stdu_crtc_set_config,
.page_flip = vmw_stdu_crtc_page_flip,
};
 
 
 
/******************************************************************************
* Screen Target Display Unit Encoder Functions
*****************************************************************************/
 
/**
* vmw_stdu_encoder_destroy - cleans up the STDU
*
* @encoder: used the get the containing STDU
*
* vmwgfx cleans up crtc/encoder/connector all at the same time so technically
* this can be a no-op. Nevertheless, it doesn't hurt of have this in case
* the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
* get called.
*/
static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
{
vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
}
 
static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
.destroy = vmw_stdu_encoder_destroy,
};
 
 
 
/******************************************************************************
* Screen Target Display Unit Connector Functions
*****************************************************************************/
 
/**
* vmw_stdu_connector_destroy - cleans up the STDU
*
* @connector: used to get the containing STDU
*
* vmwgfx cleans up crtc/encoder/connector all at the same time so technically
* this can be a no-op. Nevertheless, it doesn't hurt of have this in case
* the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
* get called.
*/
static void vmw_stdu_connector_destroy(struct drm_connector *connector)
{
vmw_stdu_destroy(vmw_connector_to_stdu(connector));
}
 
 
 
static struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.save = vmw_du_connector_save,
.restore = vmw_du_connector_restore,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
.destroy = vmw_stdu_connector_destroy,
};
 
 
 
/**
* vmw_stdu_init - Sets up a Screen Target Display Unit
*
* @dev_priv: VMW DRM device
* @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
*
* This function is called once per CRTC, and allocates one Screen Target
* display unit to represent that CRTC. Since the SVGA device does not separate
* out encoder and connector, they are represented as part of the STDU as well.
*/
static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_target_display_unit *stdu;
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
 
stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
if (!stdu)
return -ENOMEM;
 
stdu->base.unit = unit;
crtc = &stdu->base.crtc;
encoder = &stdu->base.encoder;
connector = &stdu->base.connector;
 
stdu->base.pref_active = (unit == 0);
stdu->base.pref_width = dev_priv->initial_width;
stdu->base.pref_height = dev_priv->initial_height;
stdu->base.is_implicit = true;
 
drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
connector->status = vmw_du_connector_detect(connector, false);
 
drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
 
(void) drm_connector_register(connector);
 
drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs);
 
drm_mode_crtc_set_gamma_size(crtc, 256);
 
drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
 
return 0;
}
 
 
 
/**
* vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
*
* @stdu: Screen Target Display Unit to be destroyed
*
* Clean up after vmw_stdu_init
*/
static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
{
vmw_stdu_unpin_display(stdu);
 
vmw_du_cleanup(&stdu->base);
kfree(stdu);
}
 
 
 
/******************************************************************************
* Screen Target Display KMS Functions
*
* These functions are called by the common KMS code in vmwgfx_kms.c
*****************************************************************************/
 
/**
* vmw_kms_stdu_init_display - Initializes a Screen Target based display
*
* @dev_priv: VMW DRM device
*
* This function initialize a Screen Target based display device. It checks
* the capability bits to make sure the underlying hardware can support
* screen targets, and then creates the maximum number of CRTCs, a.k.a Display
* Units, as supported by the display hardware.
*
* RETURNS:
* 0 on success, error code otherwise
*/
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int i, ret;
 
 
/* Do nothing if Screen Target support is turned off */
if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
return -ENOSYS;
 
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
return -ENOSYS;
 
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
if (unlikely(ret != 0))
return ret;
 
ret = drm_mode_create_dirty_info_property(dev);
if (unlikely(ret != 0))
goto err_vblank_cleanup;
 
dev_priv->active_display_unit = vmw_du_screen_target;
 
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
ret = vmw_stdu_init(dev_priv, i);
 
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize STDU %d", i);
goto err_vblank_cleanup;
}
}
 
DRM_INFO("Screen Target Display device initialized\n");
 
return 0;
 
err_vblank_cleanup:
drm_vblank_cleanup(dev);
return ret;
}
 
 
 
/**
* vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
*
* @dev_priv: VMW DRM device
*
* Frees up any resources allocated by vmw_kms_stdu_init_display
*
* RETURNS:
* 0 on success
*/
int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
drm_vblank_cleanup(dev);
 
return 0;
}
/drivers/video/drm/vmwgfx/vmwgfx_surface.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
27,9 → 27,12
 
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
#include <ttm/ttm_placement.h>
#include "svga3d_surfacedefs.h"
#include "device_include/svga3d_surfacedefs.h"
 
 
/**
* struct vmw_user_surface - User-space visible surface resource
*
42,6 → 45,8
struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
struct drm_master *master;
struct ttm_base_object *backup_base;
};
 
/**
219,7 → 224,7
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
cmd->body.format = cpu_to_le32(srf->format);
cmd->body.format = srf->format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
 
339,7 → 344,7
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
vmw_3d_resource_dec(dev_priv, false);
vmw_fifo_resource_dec(dev_priv);
}
 
/**
575,7 → 580,7
 
BUG_ON(res_free == NULL);
if (!dev_priv->has_mob)
(void) vmw_3d_resource_inc(dev_priv, false);
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
(dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
582,7 → 587,7
 
if (unlikely(ret != 0)) {
if (!dev_priv->has_mob)
vmw_3d_resource_dec(dev_priv, false);
vmw_fifo_resource_dec(dev_priv);
res_free(res);
return ret;
}
592,6 → 597,7
* surface validate.
*/
 
INIT_LIST_HEAD(&srf->view_list);
vmw_resource_activate(res, vmw_hw_surface_destroy);
return ret;
}
625,10 → 631,12
struct vmw_private *dev_priv = srf->res.dev_priv;
uint32_t size = user_srf->size;
 
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
// ttm_base_object_kfree(user_srf, base);
ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
 
649,10 → 657,28
struct vmw_resource *res = &user_srf->srf.res;
 
*p_base = NULL;
if (user_srf->backup_base)
ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res);
}
 
#if 0
* vmw_user_surface_destroy_ioctl - Ioctl function implementing
* the user surface destroy functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 
return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
}
 
/**
* vmw_user_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
704,6 → 730,7
desc = svga3dsurface_get_desc(req->format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
DRM_ERROR("Invalid surface format for surface creation.\n");
DRM_ERROR("Format requested is: %d\n", req->format);
return -EINVAL;
}
 
814,6 → 841,25
if (unlikely(ret != 0))
goto out_unlock;
 
/*
* A gb-aware client referencing a shared surface will
* expect a backup buffer to be present.
*/
if (dev_priv->has_mob && req->shareable) {
uint32_t backup_handle;
 
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
res->backup_size,
true,
&backup_handle,
&res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
}
 
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
840,10 → 886,89
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
 
 
static int
vmw_surface_handle_reference(struct vmw_private *dev_priv,
struct drm_file *file_priv,
uint32_t u_handle,
enum drm_vmw_handle_type handle_type,
struct ttm_base_object **base_p)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_user_surface *user_srf;
uint32_t handle;
struct ttm_base_object *base;
int ret;
 
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
if (unlikely(drm_is_render_client(file_priv))) {
DRM_ERROR("Render client refused legacy "
"surface reference.\n");
return -EACCES;
}
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
return -EACCES;
}
 
handle = u_handle;
}
 
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
goto out_no_lookup;
}
 
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
DRM_ERROR("Referenced object is not a surface.\n");
goto out_bad_resource;
}
 
if (handle_type != DRM_VMW_HANDLE_PRIME) {
user_srf = container_of(base, struct vmw_user_surface,
prime.base);
 
/*
* Make sure the surface creator has the same
* authenticating master.
*/
if (drm_is_primary_client(file_priv) &&
user_srf->master != file_priv->master) {
DRM_ERROR("Trying to reference surface outside of"
" master domain.\n");
ret = -EACCES;
goto out_bad_resource;
}
 
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
}
}
 
*base_p = base;
return 0;
 
out_bad_resource:
ttm_base_object_unref(&base);
out_no_lookup:
if (handle_type == DRM_VMW_HANDLE_PRIME)
(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
 
return ret;
}
 
/**
* vmw_user_surface_define_ioctl - Ioctl function implementing
* the user surface reference functionality.
890,8 → 1015,7
ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
ret = -EFAULT;
}
out_bad_resource:
out_no_reference:
 
ttm_base_object_unref(&base);
 
return ret;
898,3 → 1022,377
}
 
#endif
/**
* vmw_surface_define_encode - Encode a surface_define command.
*
* @srf: Pointer to a struct vmw_surface object.
* @cmd_space: Pointer to memory area in which the commands should be encoded.
*/
static int vmw_gb_surface_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf = vmw_res_to_srf(res);
uint32_t cmd_len, cmd_id, submit_len;
int ret;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface body;
} *cmd;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v2 body;
} *cmd2;
 
if (likely(res->id != -1))
return 0;
 
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a surface id.\n");
goto out_no_id;
}
 
if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
ret = -EBUSY;
goto out_no_fifo;
}
 
if (srf->array_size > 0) {
/* has_dx checked on creation time. */
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
cmd_len = sizeof(cmd2->body);
submit_len = sizeof(*cmd2);
} else {
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
cmd_len = sizeof(cmd->body);
submit_len = sizeof(*cmd);
}
 
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
 
if (srf->array_size > 0) {
cmd2->header.id = cmd_id;
cmd2->header.size = cmd_len;
cmd2->body.sid = srf->res.id;
cmd2->body.surfaceFlags = srf->flags;
cmd2->body.format = cpu_to_le32(srf->format);
cmd2->body.numMipLevels = srf->mip_levels[0];
cmd2->body.multisampleCount = srf->multisample_count;
cmd2->body.autogenFilter = srf->autogen_filter;
cmd2->body.size.width = srf->base_size.width;
cmd2->body.size.height = srf->base_size.height;
cmd2->body.size.depth = srf->base_size.depth;
cmd2->body.arraySize = srf->array_size;
} else {
cmd->header.id = cmd_id;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
cmd->body.format = cpu_to_le32(srf->format);
cmd->body.numMipLevels = srf->mip_levels[0];
cmd->body.multisampleCount = srf->multisample_count;
cmd->body.autogenFilter = srf->autogen_filter;
cmd->body.size.width = srf->base_size.width;
cmd->body.size.height = srf->base_size.height;
cmd->body.size.depth = srf->base_size.depth;
}
 
vmw_fifo_commit(dev_priv, submit_len);
 
return 0;
 
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
vmw_fifo_resource_dec(dev_priv);
return ret;
}
 
 
static int vmw_gb_surface_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBSurface body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBSurface body;
} *cmd2;
uint32_t submit_size;
struct ttm_buffer_object *bo = val_buf->bo;
 
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
 
cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd1 == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"binding.\n");
return -ENOMEM;
}
 
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd1->body.mobid = bo->mem.start;
if (res->backup_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
res->backup_dirty = false;
}
vmw_fifo_commit(dev_priv, submit_size);
 
return 0;
}
 
static int vmw_gb_surface_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdReadbackGBSurface body;
} *cmd1;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdInvalidateGBSurface body;
} *cmd2;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBindGBSurface body;
} *cmd3;
uint32_t submit_size;
uint8_t *cmd;
 
 
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"unbinding.\n");
return -ENOMEM;
}
 
if (readback) {
cmd1 = (void *) cmd;
cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd3 = (void *) &cmd1[1];
} else {
cmd2 = (void *) cmd;
cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
cmd3 = (void *) &cmd2[1];
}
 
cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd3->header.size = sizeof(cmd3->body);
cmd3->body.sid = res->id;
cmd3->body.mobid = SVGA3D_INVALID_ID;
 
vmw_fifo_commit(dev_priv, submit_size);
 
/*
* Create a fence object and fence the backup buffer.
*/
 
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
 
vmw_fence_single_bo(val_buf->bo, fence);
 
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
 
return 0;
}
 
static int vmw_gb_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf = vmw_res_to_srf(res);
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBSurface body;
} *cmd;
 
if (likely(res->id == -1))
return 0;
 
mutex_lock(&dev_priv->binding_mutex);
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
vmw_binding_res_list_scrub(&res->binding_head);
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
mutex_unlock(&dev_priv->binding_mutex);
vmw_resource_release_id(res);
vmw_fifo_resource_dec(dev_priv);
 
return 0;
}
/**
* vmw_surface_gb_priv_define - Define a private GB surface
*
* @dev: Pointer to a struct drm_device
* @user_accounting_size: Used to track user-space memory usage, set
* to 0 for kernel mode only memory
* @svga3d_flags: SVGA3d surface flags for the device
* @format: requested surface format
* @for_scanout: true if inteded to be used for scanout buffer
* @num_mip_levels: number of MIP levels
* @multisample_count:
* @array_size: Surface array size.
* @size: width, heigh, depth of the surface requested
* @user_srf_out: allocated user_srf. Set to NULL on failure.
*
* GB surfaces allocated by this function will not have a user mode handle, and
* thus will only be visible to vmwgfx. For optimization reasons the
* surface may later be given a user mode handle by another function to make
* it available to user mode drivers.
*/
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
uint32_t svga3d_flags,
SVGA3dSurfaceFormat format,
bool for_scanout,
uint32_t num_mip_levels,
uint32_t multisample_count,
uint32_t array_size,
struct drm_vmw_size size,
struct vmw_surface **srf_out)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
int ret;
u32 num_layers;
 
*srf_out = NULL;
 
if (for_scanout) {
if (!svga3dsurface_is_screen_target_format(format)) {
DRM_ERROR("Invalid Screen Target surface format.");
return -EINVAL;
}
} else {
const struct svga3d_surface_desc *desc;
 
desc = svga3dsurface_get_desc(format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
DRM_ERROR("Invalid surface format.\n");
return -EINVAL;
}
}
 
/* array_size must be null for non-GL3 host. */
if (array_size > 0 && !dev_priv->has_dx) {
DRM_ERROR("Tried to create DX surface on non-DX host.\n");
return -EINVAL;
}
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
user_accounting_size, false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface"
" creation.\n");
goto out_unlock;
}
 
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(user_srf == NULL)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
 
*srf_out = &user_srf->srf;
user_srf->size = user_accounting_size;
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
 
srf = &user_srf->srf;
srf->flags = svga3d_flags;
srf->format = format;
srf->scanout = for_scanout;
srf->mip_levels[0] = num_mip_levels;
srf->num_sizes = 1;
srf->sizes = NULL;
srf->offsets = NULL;
srf->base_size = size;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->array_size = array_size;
srf->multisample_count = multisample_count;
 
if (array_size)
num_layers = array_size;
else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
num_layers = SVGA3D_MAX_SURFACE_FACES;
else
num_layers = 1;
 
srf->res.backup_size =
svga3dsurface_get_serialized_size(srf->format,
srf->base_size,
srf->mip_levels[0],
num_layers);
 
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.backup_size += sizeof(SVGA3dDXSOState);
 
if (dev_priv->active_display_unit == vmw_du_screen_target &&
for_scanout)
srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
 
/*
* From this point, the generic resource management functions
* destroy the object on failure.
*/
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
 
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
 
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
 
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/drivers/video/drm/vmwgfx/vmwgfx_ttm_glue.c
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
57,8 → 57,6
 
int vmw_ttm_global_init(struct vmw_private *dev_priv)
{
ENTER();
 
struct drm_global_reference *global_ref;
int ret;
 
88,7 → 86,6
goto out_no_bo;
}
 
LEAVE();
return 0;
out_no_bo:
drm_global_item_unref(&dev_priv->mem_global_ref);