Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4074 → Rev 4075

/drivers/video/drm/drm_crtc.c
29,6 → 29,7
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
87,7 → 88,7
 
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
const char *fnname(int val) \
{ \
int i; \
for (i = 0; i < ARRAY_SIZE(list); i++) { \
100,7 → 101,7
/*
* Global properties
*/
static struct drm_prop_enum_list drm_dpms_enum_list[] =
static const struct drm_prop_enum_list drm_dpms_enum_list[] =
{ { DRM_MODE_DPMS_ON, "On" },
{ DRM_MODE_DPMS_STANDBY, "Standby" },
{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
112,7 → 113,7
/*
* Optional properties
*/
static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{
{ DRM_MODE_SCALE_NONE, "None" },
{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
120,7 → 121,7
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
 
static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
{
{ DRM_MODE_DITHERING_OFF, "Off" },
{ DRM_MODE_DITHERING_ON, "On" },
130,7 → 131,7
/*
* Non-global properties, but "required" for certain connectors.
*/
static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
139,7 → 140,7
 
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
 
static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
149,7 → 150,7
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
 
static struct drm_prop_enum_list drm_tv_select_enum_list[] =
static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
160,7 → 161,7
 
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
 
static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
172,18 → 173,15
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
 
static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
{ DRM_MODE_DIRTY_OFF, "Off" },
{ DRM_MODE_DIRTY_ON, "On" },
{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
};
 
DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
drm_dirty_info_enum_list)
 
struct drm_conn_prop_enum_list {
int type;
char *name;
const char *name;
int count;
};
 
209,7 → 207,7
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
};
 
static struct drm_prop_enum_list drm_encoder_enum_list[] =
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ { DRM_MODE_ENCODER_NONE, "None" },
{ DRM_MODE_ENCODER_DAC, "DAC" },
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
218,7 → 216,7
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
 
char *drm_get_encoder_name(struct drm_encoder *encoder)
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
 
229,7 → 227,7
}
EXPORT_SYMBOL(drm_get_encoder_name);
 
char *drm_get_connector_name(struct drm_connector *connector)
const char *drm_get_connector_name(const struct drm_connector *connector)
{
static char buf[32];
 
240,7 → 238,7
}
EXPORT_SYMBOL(drm_get_connector_name);
 
char *drm_get_connector_status_name(enum drm_connector_status status)
const char *drm_get_connector_status_name(enum drm_connector_status status)
{
if (status == connector_status_connected)
return "connected";
251,6 → 249,28
}
EXPORT_SYMBOL(drm_get_connector_status_name);
 
static char printable_char(int c)
{
return isascii(c) && isprint(c) ? c : '?';
}
 
const char *drm_get_format_name(uint32_t format)
{
static char buf[32];
 
snprintf(buf, sizeof(buf),
"%c%c%c%c %s-endian (0x%08x)",
printable_char(format & 0xff),
printable_char((format >> 8) & 0xff),
printable_char((format >> 16) & 0xff),
printable_char((format >> 24) & 0x7f),
format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
format);
 
return buf;
}
EXPORT_SYMBOL(drm_get_format_name);
 
/**
* drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
413,7 → 433,7
mutex_lock(&dev->mode_config.fb_lock);
fb = __drm_framebuffer_lookup(dev, id);
if (fb)
kref_get(&fb->refcount);
drm_framebuffer_reference(fb);
mutex_unlock(&dev->mode_config.fb_lock);
 
return fb;
568,17 → 588,9
}
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
if (plane->fb == fb) {
/* should turn off the crtc */
ret = plane->funcs->disable_plane(plane);
if (ret)
DRM_ERROR("failed to disable plane with busy fb\n");
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(plane->fb);
plane->fb = NULL;
plane->crtc = NULL;
if (plane->fb == fb)
drm_plane_force_disable(plane);
}
}
drm_modeset_unlock_all(dev);
}
 
592,7 → 604,7
* @crtc: CRTC object to init
* @funcs: callbacks for the new CRTC
*
* Inits a new object created as base part of an driver crtc object.
* Inits a new object created as base part of a driver crtc object.
*
* RETURNS:
* Zero on success, error code on failure.
627,11 → 639,12
EXPORT_SYMBOL(drm_crtc_init);
 
/**
* drm_crtc_cleanup - Cleans up the core crtc usage.
* drm_crtc_cleanup - Clean up the core crtc usage
* @crtc: CRTC to cleanup
*
* Cleanup @crtc. Removes from drm modesetting space
* does NOT free object, caller does that.
* This function cleans up @crtc and removes it from the DRM mode setting
* core. Note that the function does *not* free the crtc structure itself,
* this is the responsibility of the caller.
*/
void drm_crtc_cleanup(struct drm_crtc *crtc)
{
656,7 → 669,7
void drm_mode_probed_add(struct drm_connector *connector,
struct drm_display_mode *mode)
{
list_add(&mode->head, &connector->probed_modes);
list_add_tail(&mode->head, &connector->probed_modes);
}
EXPORT_SYMBOL(drm_mode_probed_add);
 
802,6 → 815,21
}
EXPORT_SYMBOL(drm_encoder_cleanup);
 
/**
* drm_plane_init - Initialise a new plane object
* @dev: DRM device
* @plane: plane object to init
* @possible_crtcs: bitmask of possible CRTCs
* @funcs: callbacks for the new plane
* @formats: array of supported formats (%DRM_FORMAT_*)
* @format_count: number of elements in @formats
* @priv: plane is private (hidden from userspace)?
*
* Inits a new object created as base part of a driver plane object.
*
* RETURNS:
* Zero on success, error code on failure.
*/
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
850,6 → 878,14
}
EXPORT_SYMBOL(drm_plane_init);
 
/**
* drm_plane_cleanup - Clean up the core plane usage
* @plane: plane to cleanup
*
* This function cleans up @plane and removes it from the DRM mode setting
* core. Note that the function does *not* free the plane structure itself,
* this is the responsibility of the caller.
*/
void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
867,6 → 903,32
EXPORT_SYMBOL(drm_plane_cleanup);
 
/**
* drm_plane_force_disable - Forcibly disable a plane
* @plane: plane to disable
*
* Forces the plane to be disabled.
*
* Used when the plane's current framebuffer is destroyed,
* and when restoring fbdev mode.
*/
void drm_plane_force_disable(struct drm_plane *plane)
{
int ret;
 
if (!plane->fb)
return;
 
ret = plane->funcs->disable_plane(plane);
if (ret)
DRM_ERROR("failed to disable plane with busy fb\n");
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(plane->fb);
plane->fb = NULL;
plane->crtc = NULL;
}
EXPORT_SYMBOL(drm_plane_force_disable);
 
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
*
1741,7 → 1803,7
 
plane_resp->plane_id = plane->base.id;
plane_resp->possible_crtcs = plane->possible_crtcs;
plane_resp->gamma_size = plane->gamma_size;
plane_resp->gamma_size = 0;
 
/*
* This ioctl is called twice, once to determine how much space is
1835,7 → 1897,8
if (fb->pixel_format == plane->format_types[i])
break;
if (i == plane->format_count) {
DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
DRM_DEBUG_KMS("Invalid pixel format %s\n",
drm_get_format_name(fb->pixel_format));
ret = -EINVAL;
goto out;
}
1908,20 → 1971,33
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
struct drm_crtc *crtc = set->crtc;
struct drm_framebuffer *fb, *old_fb;
struct drm_framebuffer *fb;
struct drm_crtc *tmp;
int ret;
 
old_fb = crtc->fb;
/*
* NOTE: ->set_config can also disable other crtcs (if we steal all
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
tmp->old_fb = tmp->fb;
 
fb = set->fb;
 
ret = crtc->funcs->set_config(set);
if (ret == 0) {
if (old_fb)
drm_framebuffer_unreference(old_fb);
if (fb)
drm_framebuffer_reference(fb);
/* crtc->fb must be updated by ->set_config, enforces this. */
WARN_ON(fb != crtc->fb);
}
 
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
if (tmp->fb)
drm_framebuffer_reference(tmp->fb);
// if (tmp->old_fb)
// drm_framebuffer_unreference(tmp->old_fb);
}
 
return ret;
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
2102,10 → 2178,10
return ret;
}
 
int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
static int drm_mode_cursor_common(struct drm_device *dev,
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
{
struct drm_mode_cursor *req = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
2125,11 → 2201,15
 
mutex_lock(&crtc->mutex);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
goto out;
}
/* Turns off the cursor if handle is 0 */
if (crtc->funcs->cursor_set2)
ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
req->width, req->height, req->hot_x, req->hot_y);
else
ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
req->width, req->height);
}
2146,7 → 2226,26
mutex_unlock(&crtc->mutex);
 
return ret;
 
}
int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor *req = data;
struct drm_mode_cursor2 new_req;
 
memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
new_req.hot_x = new_req.hot_y = 0;
 
return drm_mode_cursor_common(dev, &new_req, file_priv);
}
 
int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor2 *req = data;
return drm_mode_cursor_common(dev, req, file_priv);
}
#endif
/* Original addfb only supported RGB formats, so figure out which one */
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
2315,7 → 2414,8
 
ret = format_check(r);
if (ret) {
DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
DRM_DEBUG_KMS("bad framebuffer format %s\n",
drm_get_format_name(r->pixel_format));
return ret;
}
 
/drivers/video/drm/drm_crtc_helper.c
189,13 → 189,14
if (list_empty(&connector->modes))
return 0;
 
list_for_each_entry(mode, &connector->modes, head)
mode->vrefresh = drm_mode_vrefresh(mode);
 
drm_mode_sort(&connector->modes);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
drm_get_connector_name(connector));
list_for_each_entry(mode, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
}
564,15 → 565,14
 
DRM_DEBUG_KMS("\n");
 
if (!set)
return -EINVAL;
BUG_ON(!set);
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
 
if (!set->crtc)
return -EINVAL;
/* Enforce sane interface api - has been abused by the fb helper. */
BUG_ON(!set->mode && set->fb);
BUG_ON(set->fb && set->num_connectors == 0);
 
if (!set->crtc->helper_private)
return -EINVAL;
 
crtc_funcs = set->crtc->helper_private;
 
if (!set->mode)
645,11 → 645,6
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
} else if (set->fb->depth != set->crtc->fb->depth) {
mode_changed = true;
} else if (set->fb->bits_per_pixel !=
set->crtc->fb->bits_per_pixel) {
mode_changed = true;
} else if (set->fb->pixel_format !=
set->crtc->fb->pixel_format) {
mode_changed = true;
682,8 → 677,13
/* don't break so fail path works correct */
fail = 1;
break;
 
if (connector->dpms != DRM_MODE_DPMS_ON) {
DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
mode_changed = true;
}
}
}
 
if (new_encoder != connector->encoder) {
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
/drivers/video/drm/drm_edid.c
968,6 → 968,9
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
 
if (WARN_ON(!raw_edid))
return false;
 
if (edid_fixup > 8 || edid_fixup < 0)
edid_fixup = 6;
 
1010,15 → 1013,15
break;
}
 
return 1;
return true;
 
bad:
if (raw_edid && print_bad_edid) {
if (print_bad_edid) {
printk(KERN_ERR "Raw EDID:\n");
print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
}
return 0;
return false;
}
EXPORT_SYMBOL(drm_edid_block_valid);
 
1706,11 → 1709,11
return NULL;
 
if (pt->misc & DRM_EDID_PT_STEREO) {
printk(KERN_WARNING "stereo mode not supported\n");
DRM_DEBUG_KMS("stereo mode not supported\n");
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
printk(KERN_WARNING "composite sync not supported\n");
DRM_DEBUG_KMS("composite sync not supported\n");
}
 
/* it is incorrect if hsync/vsync width is zero */
2321,6 → 2324,31
}
EXPORT_SYMBOL(drm_find_cea_extension);
 
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
*/
static unsigned int
cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
{
unsigned int clock = cea_mode->clock;
 
if (cea_mode->vrefresh % 6 != 0)
return clock;
 
/*
* edid_cea_modes contains the 59.94Hz
* variant for 240 and 480 line modes,
* and the 60Hz variant otherwise.
*/
if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
clock = clock * 1001 / 1000;
else
clock = DIV_ROUND_UP(clock * 1000, 1001);
 
return clock;
}
 
/**
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
2339,21 → 2367,9
const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
unsigned int clock1, clock2;
 
clock1 = clock2 = cea_mode->clock;
 
/* Check both 60Hz and 59.94Hz */
if (cea_mode->vrefresh % 6 == 0) {
/*
* edid_cea_modes contains the 59.94Hz
* variant for 240 and 480 line modes,
* and the 60Hz variant otherwise.
*/
if (cea_mode->vdisplay == 240 ||
cea_mode->vdisplay == 480)
clock1 = clock1 * 1001 / 1000;
else
clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
}
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
 
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2364,7 → 2380,67
}
EXPORT_SYMBOL(drm_match_cea_mode);
 
static int
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *tmp;
LIST_HEAD(list);
int modes = 0;
 
/* Don't add CEA modes if the CEA extension block is missing */
if (!drm_find_cea_extension(edid))
return 0;
 
/*
* Go through all probed modes and create a new mode
* with the alternate clock for certain CEA modes.
*/
list_for_each_entry(mode, &connector->probed_modes, head) {
const struct drm_display_mode *cea_mode;
struct drm_display_mode *newmode;
u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
unsigned int clock1, clock2;
 
if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
continue;
 
cea_mode = &edid_cea_modes[cea_mode_idx];
 
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
 
if (clock1 == clock2)
continue;
 
if (mode->clock != clock1 && mode->clock != clock2)
continue;
 
newmode = drm_mode_duplicate(dev, cea_mode);
if (!newmode)
continue;
 
/*
* The current mode could be either variant. Make
* sure to pick the "other" clock for the new mode.
*/
if (mode->clock != clock1)
newmode->clock = clock1;
else
newmode->clock = clock2;
 
list_add_tail(&newmode->head, &list);
}
 
list_for_each_entry_safe(mode, tmp, &list, head) {
list_del(&mode->head);
drm_mode_probed_add(connector, mode);
modes++;
}
 
return modes;
}
 
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
2946,6 → 3022,7
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
num_modes += add_alternate_cea_modes(connector, edid);
 
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
/drivers/video/drm/drm_gem.c
104,12 → 104,8
return -ENOMEM;
}
 
if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&mm->offset_hash);
kfree(mm);
return -ENOMEM;
}
drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
 
return 0;
}
447,25 → 443,21
spin_lock(&dev->object_name_lock);
if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
obj->name = ret;
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
idr_preload_end();
 
if (ret < 0)
goto err;
ret = 0;
 
obj->name = ret;
 
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
} else {
}
 
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
idr_preload_end();
ret = 0;
}
 
err:
spin_unlock(&dev->object_name_lock);
idr_preload_end();
drm_gem_object_unreference_unlocked(obj);
return ret;
}
/drivers/video/drm/drm_hashtab.c
0,0 → 1,212
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Simple open hash tab implementation.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
 
#include <drm/drmP.h>
#include <drm/drm_hashtab.h>
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/rculist.h>
 
#define hlist_for_each_entry_rcu(pos, head, member) \
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
 
 
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int size = 1 << order;
 
ht->order = order;
ht->table = NULL;
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
 
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
int count = 0;
 
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
 
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
 
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
 
 
static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
unsigned int hashed_key;
 
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry_rcu(entry, h_list, head) {
if (entry->key == key)
return &entry->head;
if (entry->key > key)
break;
}
return NULL;
}
 
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *parent;
unsigned int hashed_key;
unsigned long key = item->key;
 
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
hlist_for_each_entry(entry, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
parent = &entry->head;
}
if (parent) {
hlist_add_after_rcu(parent, &item->head);
} else {
hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
EXPORT_SYMBOL(drm_ht_insert_item);
 
/*
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
int ret;
unsigned long mask = (1 << bits) - 1;
unsigned long first, unshifted_key;
 
unshifted_key = hash_long(seed, bits);
first = unshifted_key;
do {
item->key = (unshifted_key << shift) + add;
ret = drm_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
} while(ret && (unshifted_key != first));
 
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(drm_ht_just_insert_please);
 
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
{
struct hlist_node *list;
 
list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
 
*item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
EXPORT_SYMBOL(drm_ht_find_item);
 
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
struct hlist_node *list;
 
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
}
 
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init_rcu(&item->head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
 
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
kfree(ht->table);
ht->table = NULL;
}
}
EXPORT_SYMBOL(drm_ht_remove);
/drivers/video/drm/drm_irq.c
59,6 → 59,75
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
 
irqreturn_t device_irq_handler(struct drm_device *dev)
{
 
printf("video irq\n");
 
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
 
return dev->driver->irq_handler(0, dev);
}
 
/**
* Install IRQ handler.
*
* \param dev DRM device.
*
* Initializes the IRQ related data. Installs the handler, calling the driver
* \c irq_preinstall() and \c irq_postinstall() functions
* before and after the installation.
*/
int drm_irq_install(struct drm_device *dev)
{
int ret;
unsigned long sh_flags = 0;
char *irqname;
 
 
if (drm_dev_to_irq(dev) == 0)
return -EINVAL;
 
mutex_lock(&dev->struct_mutex);
 
/* Driver must have been initialized */
if (!dev->dev_private) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
 
if (dev->irq_enabled) {
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
 
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
 
/* Before installing handler */
if (dev->driver->irq_preinstall)
dev->driver->irq_preinstall(dev);
 
ret = !AttachIntHandler(drm_dev_to_irq(dev), device_irq_handler, (u32)dev);
 
/* After installing handler */
if (dev->driver->irq_postinstall)
ret = dev->driver->irq_postinstall(dev);
 
if (ret < 0) {
DRM_ERROR(__FUNCTION__);
}
 
u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
cmd&= ~(1<<10);
PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
 
return ret;
}
EXPORT_SYMBOL(drm_irq_install);
 
 
static inline u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
82,7 → 151,6
return div_u64(dividend, d);
}
 
 
/**
* drm_calc_timestamping_constants - Calculate and
* store various constants which are later needed by
175,6 → 243,10
#if 0
unsigned long irqflags;
 
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
 
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
188,3 → 260,7
#endif
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
 
 
 
 
/drivers/video/drm/drm_mm.c
669,7 → 669,7
}
EXPORT_SYMBOL(drm_mm_clean);
 
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
690,8 → 690,6
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
mm->color_adjust = NULL;
 
return 0;
}
EXPORT_SYMBOL(drm_mm_init);
 
699,8 → 697,8
{
struct drm_mm_node *entry, *next;
 
if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
if (WARN(!list_empty(&mm->head_node.node_list),
"Memory manager not clean. Delaying takedown\n")) {
return;
}
 
716,37 → 714,38
}
EXPORT_SYMBOL(drm_mm_takedown);
 
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
const char *prefix)
{
struct drm_mm_node *entry;
unsigned long total_used = 0, total_free = 0, total = 0;
unsigned long hole_start, hole_end, hole_size;
 
hole_start = drm_mm_hole_node_start(&mm->head_node);
hole_end = drm_mm_hole_node_end(&mm->head_node);
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
if (hole_size)
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
total_free += hole_size;
return hole_size;
}
 
return 0;
}
 
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
unsigned long total_used = 0, total_free = 0, total = 0;
 
total_free += drm_mm_debug_hole(&mm->head_node, prefix);
 
drm_mm_for_each_node(entry, mm) {
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
entry->size);
total_used += entry->size;
 
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
total_free += hole_size;
total_free += drm_mm_debug_hole(entry, prefix);
}
}
total = total_free + total_used;
 
printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
/drivers/video/drm/drm_modes.c
533,6 → 533,8
dmode->flags |= DRM_MODE_FLAG_INTERLACE;
if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
dmode->flags |= DRM_MODE_FLAG_DBLCLK;
drm_mode_set_name(dmode);
 
return 0;
785,16 → 787,17
* LOCKING:
* None.
*
* Copy an existing mode into another mode, preserving the object id
* of the destination mode.
* Copy an existing mode into another mode, preserving the object id and
* list head of the destination mode.
*/
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
{
int id = dst->base.id;
struct list_head head = dst->head;
 
*dst = *src;
dst->base.id = id;
INIT_LIST_HEAD(&dst->head);
dst->head = head;
}
EXPORT_SYMBOL(drm_mode_copy);
 
1015,6 → 1018,11
diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
if (diff)
return diff;
 
diff = b->vrefresh - a->vrefresh;
if (diff)
return diff;
 
diff = b->clock - a->clock;
return diff;
}
/drivers/video/drm/ttm/ttm_bo.c
27,54 → 27,1228
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* Notes:
 
#define pr_fmt(fmt) "[TTM] " fmt
 
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/module.h>
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
 
mutex_lock(&man->io_reserve_mutex);
return 0;
}
 
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
if (likely(man->io_reserve_fastpath))
return;
 
mutex_unlock(&man->io_reserve_mutex);
}
 
 
#if 0
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 
pr_err(" has_type: %d\n", man->has_type);
pr_err(" use_type: %d\n", man->use_type);
pr_err(" flags: 0x%08X\n", man->flags);
pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
pr_err(" size: %llu\n", man->size);
pr_err(" available_caching: 0x%08X\n", man->available_caching);
pr_err(" default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
(*man->func->debug)(man, TTM_PFX);
}
 
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
int i, ret, mem_type;
 
pr_err("No space for %p (%lu pages, %luK, %luM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return;
pr_err(" placement[%d]=0x%08X (%d)\n",
i, placement->placement[i], mem_type);
ttm_mem_type_debug(bo->bdev, mem_type);
}
}
 
static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
{
struct ttm_bo_global *glob =
container_of(kobj, struct ttm_bo_global, kobj);
 
return snprintf(buffer, PAGE_SIZE, "%lu\n",
(unsigned long) atomic_read(&glob->bo_count));
}
 
static struct attribute *ttm_bo_global_attrs[] = {
&ttm_bo_count,
NULL
};
 
static const struct sysfs_ops ttm_bo_global_ops = {
.show = &ttm_bo_global_show
};
 
static struct kobj_type ttm_bo_glob_kobj_type = {
.release = &ttm_bo_global_kobj_release,
.sysfs_ops = &ttm_bo_global_ops,
.default_attrs = ttm_bo_global_attrs
};
#endif
 
 
static inline uint32_t ttm_bo_type_flags(unsigned type)
{
return 1 << (type);
}
 
static void ttm_bo_release_list(struct kref *list_kref)
{
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
 
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->sync_obj != NULL);
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
 
if (bo->ttm)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
if (bo->destroy)
bo->destroy(bo);
else {
kfree(bo);
}
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
 
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
 
// BUG_ON(!ttm_bo_is_reserved(bo));
 
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 
BUG_ON(!list_empty(&bo->lru));
 
man = &bdev->man[bo->mem.mem_type];
list_add_tail(&bo->lru, &man->lru);
kref_get(&bo->list_kref);
 
if (bo->ttm != NULL) {
list_add_tail(&bo->swap, &bo->glob->swap_lru);
kref_get(&bo->list_kref);
}
}
}
EXPORT_SYMBOL(ttm_bo_add_to_lru);
 
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
int put_count = 0;
 
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
++put_count;
}
if (!list_empty(&bo->lru)) {
list_del_init(&bo->lru);
++put_count;
}
 
/*
* TODO: Add a driver hook to delete from
* driver-specific LRU's here.
*/
 
return put_count;
}
 
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
 
void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
bool never_free)
{
// kref_sub(&bo->list_kref, count,
// (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
}
 
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
{
int put_count;
 
spin_lock(&bo->glob->lru_lock);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&bo->glob->lru_lock);
ttm_bo_list_ref_sub(bo, put_count, true);
}
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
 
 
/*
* Call bo->mutex locked.
*/
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
uint32_t page_flags = 0;
 
// TTM_ASSERT_LOCKED(&bo->mutex);
bo->ttm = NULL;
 
if (bdev->need_dma32)
page_flags |= TTM_PAGE_FLAG_DMA32;
 
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
case ttm_bo_type_kernel:
bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags, glob->dummy_read_page);
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
case ttm_bo_type_sg:
bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags | TTM_PAGE_FLAG_SG,
glob->dummy_read_page);
if (unlikely(bo->ttm == NULL)) {
ret = -ENOMEM;
break;
}
bo->ttm->sg = bo->sg;
break;
default:
pr_err("Illegal buffer object type\n");
ret = -EINVAL;
break;
}
 
return ret;
}
 
#if 0
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
bool evict, bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
int ret = 0;
 
if (old_is_pci || new_is_pci ||
((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
ret = ttm_mem_io_lock(old_man, true);
if (unlikely(ret != 0))
goto out_err;
ttm_bo_unmap_virtual_locked(bo);
ttm_mem_io_unlock(old_man);
}
 
/*
* Create and bind a ttm if required.
*/
 
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
if (bo->ttm == NULL) {
bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
ret = ttm_bo_add_ttm(bo, zero);
if (ret)
goto out_err;
}
 
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
if (ret)
goto out_err;
 
if (mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_bind(bo->ttm, mem);
if (ret)
goto out_err;
}
 
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
if (bdev->driver->move_notify)
bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
mem->mm_node = NULL;
goto moved;
}
}
 
if (bdev->driver->move_notify)
bdev->driver->move_notify(bo, mem);
 
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
no_wait_gpu, mem);
else
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
 
if (ret) {
if (bdev->driver->move_notify) {
struct ttm_mem_reg tmp_mem = *mem;
*mem = bo->mem;
bo->mem = tmp_mem;
bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
*mem = tmp_mem;
}
 
goto out_err;
}
 
moved:
if (bo->evicted) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
if (ret)
pr_err("Can not flush read caches\n");
bo->evicted = false;
}
 
if (bo->mem.mm_node) {
bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
} else
bo->offset = 0;
 
return 0;
 
out_err:
new_man = &bdev->man[bo->mem.mem_type];
if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
 
return ret;
}
 
/**
* Call bo::reserved.
* Will release GPU memory type usage on destruction.
* This is the place to put in driver specific hooks to release
* driver private resources.
* Will release the bo::reserved lock.
*/
 
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
if (bo->bdev->driver->move_notify)
bo->bdev->driver->move_notify(bo, NULL);
 
if (bo->ttm) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
ttm_bo_mem_put(bo, &bo->mem);
 
ww_mutex_unlock (&bo->resv->lock);
}
 
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_bo_driver *driver = bdev->driver;
void *sync_obj = NULL;
int put_count;
int ret;
 
spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
if (!ret && !bo->sync_obj) {
spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
 
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
 
ttm_bo_list_ref_sub(bo, put_count, true);
 
return;
}
if (bo->sync_obj)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
 
if (!ret)
ww_mutex_unlock(&bo->resv->lock);
 
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
 
if (sync_obj) {
driver->sync_obj_flush(sync_obj);
driver->sync_obj_unref(&sync_obj);
}
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
 
/**
* function ttm_bo_cleanup_refs_and_unlock
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, do nothing.
*
* We store bo pointer in drm_mm_node struct so we know which bo own a
* specific node. There is no protection on the pointer, thus to make
* sure things don't go berserk you have to access this pointer while
* holding the global lru lock and make sure anytime you free a node you
* reset the pointer to NULL.
* Must be called with lru_lock and reservation held, this function
* will drop both before returning.
*
* @interruptible Any sleeps should occur interruptibly.
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
*/
 
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
#include <linux/module.h>
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_global *glob = bo->glob;
int put_count;
int ret;
 
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true);
 
if (ret && !no_wait_gpu) {
void *sync_obj;
 
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_size)
/*
* Take a reference to the fence and unreserve,
* at this point the buffer should be dead, so
* no new sync objects can be attached.
*/
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
 
ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
 
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
driver->sync_obj_unref(&sync_obj);
if (ret)
return ret;
 
/*
* remove sync_obj with ttm_bo_wait, the wait should be
* finished, and no new wait object should have been added.
*/
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true);
WARN_ON(ret);
spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
 
spin_lock(&glob->lru_lock);
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 
/*
* We raced, and lost, someone else holds the reservation now,
* and is probably busy in ttm_bo_cleanup_memtype_use.
*
* Even if it's not the case, because we finished waiting any
* delayed destruction would succeed, so just return success
* here.
*/
if (ret) {
spin_unlock(&glob->lru_lock);
return 0;
}
} else
spin_unlock(&bdev->fence_lock);
 
if (ret || unlikely(list_empty(&bo->ddestroy))) {
ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
return ret;
}
 
put_count = ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
++put_count;
 
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
 
ttm_bo_list_ref_sub(bo, put_count, true);
 
return 0;
}
 
/**
* Traverse the delayed list, and call ttm_bo_cleanup_refs on all
* encountered buffers.
*/
 
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
int ret = -EINVAL;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_buffer_object *entry = NULL;
int ret = 0;
 
spin_lock(&glob->lru_lock);
if (list_empty(&bdev->ddestroy))
goto out_unlock;
 
entry = list_first_entry(&bdev->ddestroy,
struct ttm_buffer_object, ddestroy);
kref_get(&entry->list_kref);
 
for (;;) {
struct ttm_buffer_object *nentry = NULL;
 
if (entry->ddestroy.next != &bdev->ddestroy) {
nentry = list_first_entry(&entry->ddestroy,
struct ttm_buffer_object, ddestroy);
kref_get(&nentry->list_kref);
}
 
ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
if (remove_all && ret) {
spin_unlock(&glob->lru_lock);
ret = ttm_bo_reserve_nolru(entry, false, false,
false, 0);
spin_lock(&glob->lru_lock);
}
 
if (!ret)
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
!remove_all);
else
spin_unlock(&glob->lru_lock);
 
kref_put(&entry->list_kref, ttm_bo_release_list);
entry = nentry;
 
if (ret || !entry)
goto out;
 
spin_lock(&glob->lru_lock);
if (list_empty(&entry->ddestroy))
break;
}
 
out_unlock:
spin_unlock(&glob->lru_lock);
out:
if (entry)
kref_put(&entry->list_kref, ttm_bo_release_list);
return ret;
}
 
static void ttm_bo_delayed_workqueue(struct work_struct *work)
{
struct ttm_bo_device *bdev =
container_of(work, struct ttm_bo_device, wq.work);
 
if (ttm_bo_delayed_delete(bdev, false)) {
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
}
#endif
 
static void ttm_bo_release(struct kref *kref)
{
struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
write_lock(&bdev->vm_lock);
if (likely(bo->vm_node != NULL)) {
// rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
drm_mm_put_block(bo->vm_node);
bo->vm_node = NULL;
}
write_unlock(&bdev->vm_lock);
ttm_mem_io_lock(man, false);
// ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
// ttm_bo_cleanup_refs_or_queue(bo);
// kref_put(&bo->list_kref, ttm_bo_release_list);
}
 
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
 
*p_bo = NULL;
kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_unref);
 
#if 0
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
 
void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
if (resched)
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem;
struct ttm_placement placement;
int ret = 0;
 
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bdev->fence_lock);
 
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to expire sync object before buffer eviction\n");
}
goto out;
}
 
// BUG_ON(!ttm_bo_is_reserved(bo));
 
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
evict_mem.bus.io_reserved_vm = false;
evict_mem.bus.io_reserved_count = 0;
 
placement.fpfn = 0;
placement.lpfn = 0;
placement.num_placement = 0;
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
bo);
ttm_bo_mem_space_debug(bo, &placement);
}
goto out;
}
 
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem);
goto out;
}
bo->evicted = true;
out:
return ret;
}
 
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
int ret = -EBUSY, put_count;
 
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
if (!ret)
break;
}
 
if (ret) {
spin_unlock(&glob->lru_lock);
return ret;
}
 
kref_get(&bo->list_kref);
 
if (!list_empty(&bo->ddestroy)) {
ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
 
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
 
BUG_ON(ret != 0);
 
ttm_bo_list_ref_sub(bo, put_count, true);
 
ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
ttm_bo_unreserve(bo);
 
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
 
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
 
if (mem->mm_node)
(*man->func->put_node)(man, mem);
}
EXPORT_SYMBOL(ttm_bo_mem_put);
 
/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
int ret;
 
do {
ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
break;
ret = ttm_mem_evict_first(bdev, mem_type,
interruptible, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
if (mem->mm_node == NULL)
return -ENOMEM;
mem->mem_type = mem_type;
return 0;
}
 
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
uint32_t cur_placement,
uint32_t proposed_placement)
{
uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
 
/**
* Keep current caching if possible.
*/
 
if ((cur_placement & caching) != 0)
result |= (cur_placement & caching);
else if ((man->default_caching & caching) != 0)
result |= man->default_caching;
else if ((TTM_PL_FLAG_CACHED & caching) != 0)
result |= TTM_PL_FLAG_CACHED;
else if ((TTM_PL_FLAG_WC & caching) != 0)
result |= TTM_PL_FLAG_WC;
else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
result |= TTM_PL_FLAG_UNCACHED;
 
return result;
}
 
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
uint32_t mem_type,
uint32_t proposed_placement,
uint32_t *masked_placement)
{
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
 
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
return false;
 
if ((proposed_placement & man->available_caching) == 0)
return false;
 
cur_flags |= (proposed_placement & man->available_caching);
 
*masked_placement = cur_flags;
return true;
}
 
/**
* Creates space for memory region @mem according to its type.
*
* This function first searches for free space in compatible memory types in
* the priority order defined by the driver. If free space isn't found, then
* ttm_bo_mem_force_space is attempted in priority order to evict and find
* space.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0;
bool type_found = false;
bool type_ok = false;
bool has_erestartsys = false;
int i, ret;
 
if (type >= TTM_NUM_MEM_TYPES) {
printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
 
type_ok = ttm_bo_mt_compatible(man,
mem_type,
placement->placement[i],
&cur_flags);
 
if (!type_ok)
continue;
 
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, placement->placement[i],
~TTM_PL_MASK_MEMTYPE);
 
if (mem_type == TTM_PL_SYSTEM)
break;
 
if (man->has_type && man->use_type) {
type_found = true;
ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret))
return ret;
}
if (mem->mm_node)
break;
}
 
man = &bdev->man[type];
if (man->has_type) {
printk(KERN_ERR TTM_PFX
"Memory manager already initialized for type %d\n",
type);
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
return 0;
}
 
if (!type_found)
return -EINVAL;
 
for (i = 0; i < placement->num_busy_placement; ++i) {
ret = ttm_mem_type_from_flags(placement->busy_placement[i],
&mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type)
continue;
if (!ttm_bo_mt_compatible(man,
mem_type,
placement->busy_placement[i],
&cur_flags))
continue;
 
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
 
 
if (mem_type == TTM_PL_SYSTEM) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
mem->mm_node = NULL;
return 0;
}
 
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
}
if (ret == -ERESTARTSYS)
has_erestartsys = true;
}
ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
 
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu)
{
int ret = 0;
struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
 
// BUG_ON(!ttm_bo_is_reserved(bo));
 
/*
* FIXME: It's possible to pipeline buffer moves.
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
mem.bus.io_reserved_vm = false;
mem.bus.io_reserved_count = 0;
/*
* Determine where to move the buffer.
*/
ret = ttm_bo_mem_space(bo, placement, &mem,
interruptible, no_wait_gpu);
if (ret)
goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false,
interruptible, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node)
ttm_bo_mem_put(bo, &mem);
return ret;
}
#endif
 
static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
int i;
 
if (mem->mm_node && placement->lpfn != 0 &&
(mem->start < placement->fpfn ||
mem->start + mem->num_pages > placement->lpfn))
return -1;
 
for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement &
TTM_PL_MASK_CACHING) &&
(placement->placement[i] & mem->placement &
TTM_PL_MASK_MEM))
return i;
}
return -1;
}
 
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu)
{
int ret;
 
// BUG_ON(!ttm_bo_is_reserved(bo));
/* Check that range is valid */
if (placement->lpfn || placement->fpfn)
if (placement->fpfn > placement->lpfn ||
(placement->lpfn - placement->fpfn) < bo->num_pages)
return -EINVAL;
/*
* Check whether we need to move buffer.
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
// ret = ttm_bo_move_buffer(bo, placement, interruptible,
// no_wait_gpu);
if (ret)
return ret;
} else {
/*
* Use the access and other non-mapping-related flag bits from
* the compatible memory placement flags to the active flags
*/
ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
~TTM_PL_MASK_MEMTYPE);
}
/*
* We might need to add a TTM.
*/
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, true);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(ttm_bo_validate);
 
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
BUG_ON((placement->fpfn || placement->lpfn) &&
(bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
 
return 0;
}
 
int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
unsigned long size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interruptible,
struct file *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
unsigned long num_pages;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
 
// ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (ret) {
pr_err("Out of kernel memory\n");
if (destroy)
(*destroy)(bo);
else
kfree(bo);
return -ENOMEM;
}
 
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
pr_err("Illegal buffer object size\n");
if (destroy)
(*destroy)(bo);
else
kfree(bo);
// ttm_mem_global_free(mem_glob, acc_size);
return -EINVAL;
}
bo->destroy = destroy;
 
kref_init(&bo->kref);
kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
bo->num_pages = num_pages;
bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved_vm = false;
bo->mem.bus.io_reserved_count = 0;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
bo->sg = sg;
bo->resv = &bo->ttm_resv;
// reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
 
ret = ttm_bo_check_placement(bo, placement);
 
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
// if (likely(!ret) &&
// (bo->type == ttm_bo_type_device ||
// bo->type == ttm_bo_type_sg))
// ret = ttm_bo_setup_vm(bo);
 
// if (likely(!ret))
// ret = ttm_bo_validate(bo, placement, interruptible, false);
 
// ttm_bo_unreserve(bo);
 
// if (unlikely(ret))
// ttm_bo_unref(&bo);
 
return ret;
}
EXPORT_SYMBOL(ttm_bo_init);
 
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size)
{
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
size_t size = 0;
 
size += ttm_round_pot(struct_size);
size += PAGE_ALIGN(npages * sizeof(void *));
size += ttm_round_pot(sizeof(struct ttm_tt));
return size;
}
EXPORT_SYMBOL(ttm_bo_acc_size);
 
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size)
{
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
size_t size = 0;
 
size += ttm_round_pot(struct_size);
size += PAGE_ALIGN(npages * sizeof(void *));
size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
size += ttm_round_pot(sizeof(struct ttm_dma_tt));
return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
 
int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
size_t acc_size;
int ret;
 
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(bo == NULL))
return -ENOMEM;
 
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
interruptible, persistent_swap_storage, acc_size,
NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
 
return ret;
}
EXPORT_SYMBOL(ttm_bo_create);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_size)
{
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
 
ENTER();
 
BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type];
BUG_ON(man->has_type);
man->io_reserve_fastpath = true;
man->use_io_reserve_lru = false;
mutex_init(&man->io_reserve_mutex);
INIT_LIST_HEAD(&man->io_reserve_lru);
 
ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret)
return ret;
man->bdev = bdev;
 
ret = 0;
if (type != TTM_PL_SYSTEM) {
if (!p_size) {
printk(KERN_ERR TTM_PFX
"Zero size memory manager type %d\n",
type);
return ret;
}
ret = drm_mm_init(&man->manager, 0, p_size);
ret = (*man->func->init)(man, p_size);
if (ret)
return ret;
}
84,21 → 1258,32
 
INIT_LIST_HEAD(&man->lru);
 
LEAVE();
 
return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);
 
int ttm_bo_global_init(struct ttm_global_reference *ref)
 
void ttm_bo_global_release(struct drm_global_reference *ref)
{
struct ttm_bo_global *glob = ref->object;
 
}
EXPORT_SYMBOL(ttm_bo_global_release);
 
int ttm_bo_global_init(struct drm_global_reference *ref)
{
struct ttm_bo_global_ref *bo_ref =
container_of(ref, struct ttm_bo_global_ref, ref);
struct ttm_bo_global *glob = ref->object;
int ret;
 
// mutex_init(&glob->device_list_mutex);
// spin_lock_init(&glob->lru_lock);
ENTER();
 
mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock);
glob->mem_glob = bo_ref->mem_glob;
// glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
glob->dummy_read_page = AllocPage();
 
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
108,34 → 1293,81
INIT_LIST_HEAD(&glob->swap_lru);
INIT_LIST_HEAD(&glob->device_list);
 
// ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
if (unlikely(ret != 0)) {
printk(KERN_ERR TTM_PFX
"Could not register buffer object swapout.\n");
goto out_no_shrink;
}
atomic_set(&glob->bo_count, 0);
 
glob->ttm_bo_extra_size =
ttm_round_pot(sizeof(struct ttm_tt)) +
ttm_round_pot(sizeof(struct ttm_backend));
LEAVE();
 
glob->ttm_bo_size = glob->ttm_bo_extra_size +
ttm_round_pot(sizeof(struct ttm_buffer_object));
return 0;
 
atomic_set(&glob->bo_count, 0);
 
// kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
// ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
// if (unlikely(ret != 0))
// kobject_put(&glob->kobj);
return ret;
out_no_shrink:
__free_page(glob->dummy_read_page);
out_no_drp:
kfree(glob);
return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);
 
 
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
uint64_t file_page_offset,
bool need_dma32)
{
int ret = -EINVAL;
 
ENTER();
 
// rwlock_init(&bdev->vm_lock);
bdev->driver = driver;
 
memset(bdev->man, 0, sizeof(bdev->man));
 
/*
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
if (unlikely(ret != 0))
goto out_no_sys;
 
bdev->addr_space_rb = RB_ROOT;
drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
 
// INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = NULL;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
spin_lock_init(&bdev->fence_lock);
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
 
LEAVE();
 
return 0;
out_no_sys:
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);
 
/*
* buffer object vm functions.
*/
 
bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
if (mem->mem_type == TTM_PL_SYSTEM)
return false;
 
if (man->flags & TTM_MEMTYPE_FLAG_CMA)
return false;
 
if (mem->placement & TTM_PL_FLAG_CACHED)
return false;
}
return true;
}
 
/drivers/video/drm/ttm/ttm_bo_manager.c
0,0 → 1,151
/**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
 
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
 
/**
* Currently we use a spinlock for the lock, but a mutex *may* be
* more appropriate to reduce scheduling latency if the range manager
* ends up with very fragmented allocation patterns.
*/
 
struct ttm_range_manager {
struct drm_mm mm;
spinlock_t lock;
};
 
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
unsigned long lpfn;
int ret;
 
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
do {
ret = drm_mm_pre_get(mm);
if (unlikely(ret))
return ret;
 
spin_lock(&rman->lock);
node = drm_mm_search_free_in_range(mm,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(node == NULL)) {
spin_unlock(&rman->lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&rman->lock);
} while (node == NULL);
 
mem->mm_node = node;
mem->start = node->start;
return 0;
}
 
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
if (mem->mm_node) {
spin_lock(&rman->lock);
drm_mm_put_block(mem->mm_node);
spin_unlock(&rman->lock);
mem->mm_node = NULL;
}
}
 
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct ttm_range_manager *rman;
 
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman)
return -ENOMEM;
 
drm_mm_init(&rman->mm, 0, p_size);
spin_lock_init(&rman->lock);
man->priv = rman;
return 0;
}
 
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
 
spin_lock(&rman->lock);
if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
kfree(rman);
man->priv = NULL;
return 0;
}
spin_unlock(&rman->lock);
return -EBUSY;
}
 
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
spin_lock(&rman->lock);
drm_mm_debug_table(&rman->mm, prefix);
spin_unlock(&rman->lock);
}
 
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
ttm_bo_man_init,
ttm_bo_man_takedown,
ttm_bo_man_get_node,
ttm_bo_man_put_node,
ttm_bo_man_debug
};
EXPORT_SYMBOL(ttm_bo_manager_func);
/drivers/video/drm/ttm/ttm_bo_util.c
0,0 → 1,706
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
 
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
 
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
ttm_bo_mem_put(bo, &bo->mem);
}
 
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
 
if (old_mem->mem_type != TTM_PL_SYSTEM) {
ttm_tt_unbind(ttm);
ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM);
old_mem->mem_type = TTM_PL_SYSTEM;
}
 
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
if (unlikely(ret != 0))
return ret;
 
if (new_mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_bind(ttm, new_mem);
if (unlikely(ret != 0))
return ret;
}
 
*old_mem = *new_mem;
new_mem->mm_node = NULL;
 
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
 
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
if (likely(man->io_reserve_fastpath))
return 0;
 
if (interruptible)
return mutex_lock_interruptible(&man->io_reserve_mutex);
 
mutex_lock(&man->io_reserve_mutex);
return 0;
}
EXPORT_SYMBOL(ttm_mem_io_lock);
 
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
if (likely(man->io_reserve_fastpath))
return;
 
mutex_unlock(&man->io_reserve_mutex);
}
EXPORT_SYMBOL(ttm_mem_io_unlock);
 
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
struct ttm_buffer_object *bo;
 
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
return -EAGAIN;
 
bo = list_first_entry(&man->io_reserve_lru,
struct ttm_buffer_object,
io_reserve_lru);
list_del_init(&bo->io_reserve_lru);
ttm_bo_unmap_virtual_locked(bo);
 
return 0;
}
 
 
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret = 0;
 
if (!bdev->driver->io_mem_reserve)
return 0;
if (likely(man->io_reserve_fastpath))
return bdev->driver->io_mem_reserve(bdev, mem);
 
if (bdev->driver->io_mem_reserve &&
mem->bus.io_reserved_count++ == 0) {
retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
if (ret == -EAGAIN) {
ret = ttm_mem_io_evict(man);
if (ret == 0)
goto retry;
}
}
return ret;
}
EXPORT_SYMBOL(ttm_mem_io_reserve);
 
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 
if (likely(man->io_reserve_fastpath))
return;
 
if (bdev->driver->io_mem_reserve &&
--mem->bus.io_reserved_count == 0 &&
bdev->driver->io_mem_free)
bdev->driver->io_mem_free(bdev, mem);
 
}
EXPORT_SYMBOL(ttm_mem_io_free);
 
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
int ret;
 
if (!mem->bus.io_reserved_vm) {
struct ttm_mem_type_manager *man =
&bo->bdev->man[mem->mem_type];
 
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0))
return ret;
mem->bus.io_reserved_vm = true;
if (man->use_io_reserve_lru)
list_add_tail(&bo->io_reserve_lru,
&man->io_reserve_lru);
}
return 0;
}
 
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
 
if (mem->bus.io_reserved_vm) {
mem->bus.io_reserved_vm = false;
list_del_init(&bo->io_reserve_lru);
ttm_mem_io_free(bo->bdev, mem);
}
}
 
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
 
*virtual = NULL;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
 
if (mem->bus.addr) {
addr = mem->bus.addr;
} else {
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
*virtual = addr;
return 0;
}
 
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
 
man = &bdev->man[mem->mem_type];
 
if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
}
 
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
uint32_t *dstP =
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
uint32_t *srcP =
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
 
int i;
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
iowrite32(ioread32(srcP++), dstP++);
return 0;
}
 
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
{
struct page *d = ttm->pages[page];
void *dst;
 
if (!d)
return -ENOMEM;
 
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
 
#ifdef CONFIG_X86
dst = kmap_atomic_prot(d, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot);
else
dst = kmap(d);
#endif
if (!dst)
return -ENOMEM;
 
memcpy_fromio(dst, src, PAGE_SIZE);
 
#ifdef CONFIG_X86
kunmap_atomic(dst);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst);
else
kunmap(d);
#endif
 
return 0;
}
 
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
pgprot_t prot)
{
struct page *s = ttm->pages[page];
void *src;
 
if (!s)
return -ENOMEM;
 
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
#ifdef CONFIG_X86
src = kmap_atomic_prot(s, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot);
else
src = kmap(s);
#endif
if (!src)
return -ENOMEM;
 
memcpy_toio(dst, src, PAGE_SIZE);
 
#ifdef CONFIG_X86
kunmap_atomic(src);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src);
else
kunmap(s);
#endif
 
return 0;
}
 
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
unsigned long i;
unsigned long page;
unsigned long add = 0;
int dir;
 
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
if (ret)
goto out;
 
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
if (old_iomap == NULL && ttm == NULL)
goto out2;
 
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret) {
/* if we fail here don't nuke the mm node
* as the bo still owns it */
old_copy.mm_node = NULL;
goto out1;
}
}
 
add = 0;
dir = 1;
 
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->start < old_mem->start + old_mem->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
 
for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add;
if (old_iomap == NULL) {
pgprot_t prot = ttm_io_prot(old_mem->placement,
PAGE_KERNEL);
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
prot);
} else if (new_iomap == NULL) {
pgprot_t prot = ttm_io_prot(new_mem->placement,
PAGE_KERNEL);
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
if (ret) {
/* failing here, means keep old copy as-is */
old_copy.mm_node = NULL;
goto out1;
}
}
mb();
out2:
old_copy = *old_mem;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
 
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
ttm_tt_unbind(ttm);
ttm_tt_destroy(ttm);
bo->ttm = NULL;
}
 
out1:
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
 
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
kfree(bo);
}
 
/**
* ttm_buffer_object_transfer
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
* holding the data of @bo with the old placement.
*
* This is a utility function that may be called after an accelerated move
* has been scheduled. A new buffer object is created as a placeholder for
* the old data while it's being copied. When that buffer object is idle,
* it can be destroyed, releasing the space of the old placement.
* Returns:
* !0: Failure.
*/
 
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_buffer_object *fbo;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
int ret;
 
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
 
*fbo = *bo;
 
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
*/
 
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
 
spin_lock(&bdev->fence_lock);
if (bo->sync_obj)
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
else
fbo->sync_obj = NULL;
spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
fbo->acc_size = 0;
fbo->resv = &fbo->ttm_resv;
reservation_object_init(fbo->resv);
ret = ww_mutex_trylock(&fbo->resv->lock);
WARN_ON(!ret);
 
*new_obj = fbo;
return 0;
}
 
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#if defined(__i386__) || defined(__x86_64__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
 
#elif defined(__powerpc__)
if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (caching_flags & TTM_PL_FLAG_UNCACHED)
pgprot_val(tmp) |= _PAGE_GUARDED;
}
#endif
#if defined(__ia64__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__) || defined(__mips__)
if (!(caching_flags & TTM_PL_FLAG_CACHED))
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
EXPORT_SYMBOL(ttm_io_prot);
 
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long offset,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem;
 
if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
 
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long start_page,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_tt *ttm = bo->ttm;
int ret;
 
BUG_ON(!ttm);
 
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
}
 
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
*/
 
map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
PAGE_KERNEL :
ttm_io_prot(mem->placement, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
}
return (!map->virtual) ? -ENOMEM : 0;
}
 
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
 
BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
size = num_pages << PAGE_SHIFT;
return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
 
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
struct ttm_buffer_object *bo = map->bo;
struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type];
 
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
break;
case ttm_bo_map_kmap:
kunmap(map->page);
break;
case ttm_bo_map_premapped:
break;
default:
BUG();
}
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);
 
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
 
spin_lock(&bdev->fence_lock);
if (bo->sync_obj) {
tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
}
bo->sync_obj = driver->sync_obj_ref(sync_obj);
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
return ret;
 
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
(bo->ttm != NULL)) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
ttm_bo_free_old_node(bo);
} else {
/**
* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
 
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
 
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
 
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
* bo to be unbound and destroyed.
*/
 
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
 
ttm_bo_unreserve(ghost_obj);
ttm_bo_unref(&ghost_obj);
}
 
*old_mem = *new_mem;
new_mem->mm_node = NULL;
 
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
/drivers/video/drm/ttm/ttm_execbuf_util.c
0,0 → 1,235
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
struct ww_acquire_ctx{};
 
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/module.h>
 
static void ttm_eu_backoff_reservation_locked(struct list_head *list,
struct ww_acquire_ctx *ticket)
{
struct ttm_validate_buffer *entry;
 
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
if (!entry->reserved)
continue;
 
entry->reserved = false;
if (entry->removed) {
ttm_bo_add_to_lru(bo);
entry->removed = false;
}
// ww_mutex_unlock(&bo->resv->lock);
}
}
 
static void ttm_eu_del_from_lru_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
 
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
if (!entry->reserved)
continue;
 
if (!entry->removed) {
entry->put_count = ttm_bo_del_from_lru(bo);
entry->removed = true;
}
}
}
 
static void ttm_eu_list_ref_sub(struct list_head *list)
{
struct ttm_validate_buffer *entry;
 
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
 
if (entry->put_count) {
ttm_bo_list_ref_sub(bo, entry->put_count, true);
entry->put_count = 0;
}
}
}
 
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
struct ttm_bo_global *glob;
 
if (list_empty(list))
return;
 
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket);
// ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 
/*
* Reserve buffers for validation.
*
* If a buffer in the list is marked for CPU access, we back off and
* wait for that buffer to become free for GPU access.
*
* If a buffer is reserved for another validation, the validator with
* the highest validation sequence backs off and waits for that buffer
* to become unreserved. This prevents deadlocks when validating multiple
* buffers in different orders.
*/
 
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
 
if (list_empty(list))
return 0;
 
list_for_each_entry(entry, list, head) {
entry->reserved = false;
entry->put_count = 0;
entry->removed = false;
}
 
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
 
// ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
 
/* already slowpath reserved? */
if (entry->reserved)
continue;
 
 
ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
 
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
// ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
// ticket);
if (unlikely(ret != 0)) {
if (ret == -EINTR)
ret = -ERESTARTSYS;
goto err_fini;
}
 
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
ret = -EBUSY;
goto err;
}
goto retry;
} else if (ret)
goto err;
 
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
ret = -EBUSY;
goto err;
}
}
 
// ww_acquire_done(ticket);
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return 0;
 
err:
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
err_fini:
// ww_acquire_done(ticket);
// ww_acquire_fini(ticket);
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
 
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_bo_driver *driver;
 
if (list_empty(list))
return;
 
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
driver = bdev->driver;
glob = bo->glob;
 
spin_lock(&glob->lru_lock);
spin_lock(&bdev->fence_lock);
 
list_for_each_entry(entry, list, head) {
bo = entry->bo;
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
ttm_bo_add_to_lru(bo);
// ww_mutex_unlock(&bo->resv->lock);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
// ww_acquire_fini(ticket);
 
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
driver->sync_obj_unref(&entry->old_sync_obj);
}
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
/drivers/video/drm/ttm/ttm_memory.c
0,0 → 1,605
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#define pr_fmt(fmt) "[TTM] " fmt
 
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
 
struct sysinfo {
u32_t totalram; /* Total usable main memory size */
u32_t freeram; /* Available memory size */
u32_t sharedram; /* Amount of shared memory */
u32_t bufferram; /* Memory used by buffers */
u32_t totalswap; /* Total swap space size */
u32_t freeswap; /* swap space still available */
u32_t totalhigh; /* Total high memory size */
u32_t freehigh; /* Available high memory size */
u32_t mem_unit; /* Memory unit size in bytes */
};
 
 
#define TTM_MEMORY_ALLOC_RETRIES 4
 
struct ttm_mem_zone {
struct kobject kobj;
struct ttm_mem_global *glob;
const char *name;
uint64_t zone_mem;
uint64_t emer_mem;
uint64_t max_mem;
uint64_t swap_limit;
uint64_t used_mem;
};
 
#if 0
 
static struct attribute ttm_mem_sys = {
.name = "zone_memory",
.mode = S_IRUGO
};
static struct attribute ttm_mem_emer = {
.name = "emergency_memory",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_max = {
.name = "available_memory",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_swap = {
.name = "swap_limit",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_used = {
.name = "used_memory",
.mode = S_IRUGO
};
#endif
 
static void ttm_mem_zone_kobj_release(struct kobject *kobj)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
 
pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
zone->name, (unsigned long long)zone->used_mem >> 10);
kfree(zone);
}
 
#if 0
static ssize_t ttm_mem_zone_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
uint64_t val = 0;
 
spin_lock(&zone->glob->lock);
if (attr == &ttm_mem_sys)
val = zone->zone_mem;
else if (attr == &ttm_mem_emer)
val = zone->emer_mem;
else if (attr == &ttm_mem_max)
val = zone->max_mem;
else if (attr == &ttm_mem_swap)
val = zone->swap_limit;
else if (attr == &ttm_mem_used)
val = zone->used_mem;
spin_unlock(&zone->glob->lock);
 
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(unsigned long long) val >> 10);
}
 
static void ttm_check_swapping(struct ttm_mem_global *glob);
 
static ssize_t ttm_mem_zone_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer,
size_t size)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
int chars;
unsigned long val;
uint64_t val64;
 
chars = sscanf(buffer, "%lu", &val);
if (chars == 0)
return size;
 
val64 = val;
val64 <<= 10;
 
spin_lock(&zone->glob->lock);
if (val64 > zone->zone_mem)
val64 = zone->zone_mem;
if (attr == &ttm_mem_emer) {
zone->emer_mem = val64;
if (zone->max_mem > val64)
zone->max_mem = val64;
} else if (attr == &ttm_mem_max) {
zone->max_mem = val64;
if (zone->emer_mem < val64)
zone->emer_mem = val64;
} else if (attr == &ttm_mem_swap)
zone->swap_limit = val64;
spin_unlock(&zone->glob->lock);
 
ttm_check_swapping(zone->glob);
 
return size;
}
#endif
 
//static struct attribute *ttm_mem_zone_attrs[] = {
// &ttm_mem_sys,
// &ttm_mem_emer,
// &ttm_mem_max,
// &ttm_mem_swap,
// &ttm_mem_used,
// NULL
//};
 
//static const struct sysfs_ops ttm_mem_zone_ops = {
// .show = &ttm_mem_zone_show,
// .store = &ttm_mem_zone_store
//};
 
static struct kobj_type ttm_mem_zone_kobj_type = {
.release = &ttm_mem_zone_kobj_release,
// .sysfs_ops = &ttm_mem_zone_ops,
// .default_attrs = ttm_mem_zone_attrs,
};
 
static void ttm_mem_global_kobj_release(struct kobject *kobj)
{
struct ttm_mem_global *glob =
container_of(kobj, struct ttm_mem_global, kobj);
 
kfree(glob);
}
 
static struct kobj_type ttm_mem_glob_kobj_type = {
.release = &ttm_mem_global_kobj_release,
};
 
#if 0
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
bool from_wq, uint64_t extra)
{
unsigned int i;
struct ttm_mem_zone *zone;
uint64_t target;
 
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
 
if (from_wq)
target = zone->swap_limit;
else if (capable(CAP_SYS_ADMIN))
target = zone->emer_mem;
else
target = zone->max_mem;
 
target = (extra > target) ? 0ULL : target;
 
if (zone->used_mem > target)
return true;
}
return false;
}
 
/**
* At this point we only support a single shrink callback.
* Extend this if needed, perhaps using a linked list of callbacks.
* Note that this function is reentrant:
* many threads may try to swap out at any given time.
*/
 
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
uint64_t extra)
{
int ret;
struct ttm_mem_shrink *shrink;
 
spin_lock(&glob->lock);
if (glob->shrink == NULL)
goto out;
 
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
shrink = glob->shrink;
spin_unlock(&glob->lock);
ret = shrink->do_shrink(shrink);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
goto out;
}
out:
spin_unlock(&glob->lock);
}
 
 
 
static void ttm_shrink_work(struct work_struct *work)
{
struct ttm_mem_global *glob =
container_of(work, struct ttm_mem_global, work);
 
ttm_shrink(glob, true, 0ULL);
}
#endif
 
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
uint64_t mem;
int ret;
 
if (unlikely(!zone))
return -ENOMEM;
 
// mem = si->totalram - si->totalhigh;
// mem *= si->mem_unit;
 
zone->name = "kernel";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_kernel = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
glob->zones[glob->num_zones++] = zone;
return 0;
}
 
#if 0
#ifdef CONFIG_HIGHMEM
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone;
uint64_t mem;
int ret;
 
if (si->totalhigh == 0)
return 0;
 
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
if (unlikely(!zone))
return -ENOMEM;
 
mem = si->totalram;
mem *= si->mem_unit;
 
zone->name = "highmem";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_highmem = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
glob->zones[glob->num_zones++] = zone;
return 0;
}
#else
static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
uint64_t mem;
int ret;
 
if (unlikely(!zone))
return -ENOMEM;
 
mem = si->totalram;
mem *= si->mem_unit;
 
/**
* No special dma32 zone needed.
*/
 
if (mem <= ((uint64_t) 1ULL << 32)) {
kfree(zone);
return 0;
}
 
/*
* Limit max dma32 memory to 4GB for now
* until we can figure out how big this
* zone really is.
*/
 
mem = ((uint64_t) 1ULL << 32);
zone->name = "dma32";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_dma32 = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
glob->zones[glob->num_zones++] = zone;
return 0;
}
#endif
 
 
 
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
unsigned int i;
struct ttm_mem_zone *zone;
 
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
ttm_dma_page_alloc_fini();
 
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
}
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_mem_global_release);
 
static void ttm_check_swapping(struct ttm_mem_global *glob)
{
bool needs_swapping = false;
unsigned int i;
struct ttm_mem_zone *zone;
 
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (zone->used_mem > zone->swap_limit) {
needs_swapping = true;
break;
}
}
 
spin_unlock(&glob->lock);
 
if (unlikely(needs_swapping))
(void)queue_work(glob->swap_queue, &glob->work);
 
}
 
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount)
{
unsigned int i;
struct ttm_mem_zone *zone;
 
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem -= amount;
}
spin_unlock(&glob->lock);
}
 
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
EXPORT_SYMBOL(ttm_mem_global_free);
 
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount, bool reserve)
{
uint64_t limit;
int ret = -ENOMEM;
unsigned int i;
struct ttm_mem_zone *zone;
 
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
 
limit = zone->emer_mem;
 
if (zone->used_mem > limit)
goto out_unlock;
}
 
if (reserve) {
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem += amount;
}
}
 
ret = 0;
out_unlock:
spin_unlock(&glob->lock);
ttm_check_swapping(glob);
 
return ret;
}
 
 
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t memory,
bool no_wait, bool interruptible)
{
int count = TTM_MEMORY_ALLOC_RETRIES;
 
while (unlikely(ttm_mem_global_reserve(glob,
single_zone,
memory, true)
!= 0)) {
if (no_wait)
return -ENOMEM;
if (unlikely(count-- == 0))
return -ENOMEM;
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
}
 
return 0;
}
 
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible)
{
/**
* Normal allocations of kernel memory are registered in
* all zones.
*/
 
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
 
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
bool no_wait, bool interruptible)
{
 
struct ttm_mem_zone *zone = NULL;
 
/**
* Page allocations may be registed in a single zone
* only if highmem or !dma32.
*/
 
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page) && glob->zone_highmem != NULL)
zone = glob->zone_highmem;
#else
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
interruptible);
}
 
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
{
struct ttm_mem_zone *zone = NULL;
 
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page) && glob->zone_highmem != NULL)
zone = glob->zone_highmem;
#else
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
}
 
#endif
 
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
{
 
}
 
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)
return size;
else if (size > PAGE_SIZE)
return PAGE_ALIGN(size);
else {
size_t tmp_size = 4;
 
while (tmp_size < size)
tmp_size <<= 1;
 
return tmp_size;
}
return 0;
}
EXPORT_SYMBOL(ttm_round_pot);
 
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
return 0;
}
 
 
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible)
{
return 0;
}
 
EXPORT_SYMBOL(ttm_mem_global_alloc);
 
int ttm_mem_global_init(struct ttm_mem_global *glob)
{
return 0;
}
EXPORT_SYMBOL(ttm_mem_global_init);
/drivers/video/drm/ttm/ttm_object.c
0,0 → 1,462
/**************************************************************************
*
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/** @file ttm_ref_object.c
*
* Base- and reference object implementation for the various
* ttm objects. Implements reference counting, minimal security checks
* and release on file close.
*/
 
/**
* struct ttm_object_file
*
* @tdev: Pointer to the ttm_object_device.
*
* @lock: Lock that protects the ref_list list and the
* ref_hash hash tables.
*
* @ref_list: List of ttm_ref_objects to be destroyed at
* file release.
*
* @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
* for fast lookup of ref objects given a base object.
*/
 
#define pr_fmt(fmt) "[TTM] " fmt
 
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
//#include <linux/atomic.h>
 
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
return atomic_add_unless(&kref->refcount, 1, 0);
}
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
struct ttm_object_file {
struct ttm_object_device *tdev;
rwlock_t lock;
struct list_head ref_list;
struct drm_open_hash ref_hash[TTM_REF_NUM];
struct kref refcount;
};
 
/**
* struct ttm_object_device
*
* @object_lock: lock that protects the object_hash hash table.
*
* @object_hash: hash table for fast lookup of object global names.
*
* @object_count: Per device object count.
*
* This is the per-device data structure needed for ttm object management.
*/
 
struct ttm_object_device {
spinlock_t object_lock;
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
};
 
/**
* struct ttm_ref_object
*
* @hash: Hash entry for the per-file object reference hash.
*
* @head: List entry for the per-file list of ref-objects.
*
* @kref: Ref count.
*
* @obj: Base object this ref object is referencing.
*
* @ref_type: Type of ref object.
*
* This is similar to an idr object, but it also has a hash table entry
* that allows lookup with a pointer to the referenced object as a key. In
* that way, one can easily detect whether a base object is referenced by
* a particular ttm_object_file. It also carries a ref count to avoid creating
* multiple ref objects if a ttm_object_file references the same base
* object more than once.
*/
 
struct ttm_ref_object {
struct drm_hash_item hash;
struct list_head head;
struct kref kref;
enum ttm_ref_type ref_type;
struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
 
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
kref_get(&tfile->refcount);
return tfile;
}
 
static void ttm_object_file_destroy(struct kref *kref)
{
struct ttm_object_file *tfile =
container_of(kref, struct ttm_object_file, refcount);
 
kfree(tfile);
}
 
 
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
{
struct ttm_object_file *tfile = *p_tfile;
 
*p_tfile = NULL;
kref_put(&tfile->refcount, ttm_object_file_destroy);
}
 
 
int ttm_base_object_init(struct ttm_object_file *tfile,
struct ttm_base_object *base,
bool shareable,
enum ttm_object_type object_type,
void (*refcount_release) (struct ttm_base_object **),
void (*ref_obj_release) (struct ttm_base_object *,
enum ttm_ref_type ref_type))
{
struct ttm_object_device *tdev = tfile->tdev;
int ret;
 
base->shareable = shareable;
base->tfile = ttm_object_file_ref(tfile);
base->refcount_release = refcount_release;
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
kref_init(&base->refcount);
spin_lock(&tdev->object_lock);
ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
&base->hash,
(unsigned long)base, 31, 0, 0);
spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
 
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
if (unlikely(ret != 0))
goto out_err1;
 
ttm_base_object_unref(&base);
 
return 0;
out_err1:
spin_lock(&tdev->object_lock);
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
spin_unlock(&tdev->object_lock);
out_err0:
return ret;
}
EXPORT_SYMBOL(ttm_base_object_init);
 
static void ttm_release_base(struct kref *kref)
{
struct ttm_base_object *base =
container_of(kref, struct ttm_base_object, refcount);
struct ttm_object_device *tdev = base->tfile->tdev;
 
spin_lock(&tdev->object_lock);
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
spin_unlock(&tdev->object_lock);
 
/*
* Note: We don't use synchronize_rcu() here because it's far
* too slow. It's up to the user to free the object using
* call_rcu() or ttm_base_object_kfree().
*/
 
if (base->refcount_release) {
ttm_object_file_unref(&base->tfile);
base->refcount_release(&base);
}
}
 
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
 
*p_base = NULL;
 
kref_put(&base->refcount, ttm_release_base);
}
EXPORT_SYMBOL(ttm_base_object_unref);
 
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_object_device *tdev = tfile->tdev;
struct ttm_base_object *base;
struct drm_hash_item *hash;
int ret;
 
// rcu_read_lock();
ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
 
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
}
// rcu_read_unlock();
 
if (unlikely(ret != 0))
return NULL;
 
if (tfile != base->tfile && !base->shareable) {
pr_err("Attempted access of non-shareable object\n");
ttm_base_object_unref(&base);
return NULL;
}
 
return base;
}
EXPORT_SYMBOL(ttm_base_object_lookup);
 
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
struct drm_hash_item *hash;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
int ret = -EINVAL;
 
if (existed != NULL)
*existed = true;
 
while (ret == -EINVAL) {
read_lock(&tfile->lock);
ret = drm_ht_find_item(ht, base->hash.key, &hash);
 
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
kref_get(&ref->kref);
read_unlock(&tfile->lock);
break;
}
 
read_unlock(&tfile->lock);
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
return ret;
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
if (unlikely(ref == NULL)) {
ttm_mem_global_free(mem_glob, sizeof(*ref));
return -ENOMEM;
}
 
ref->hash.key = base->hash.key;
ref->obj = base;
ref->tfile = tfile;
ref->ref_type = ref_type;
kref_init(&ref->kref);
 
write_lock(&tfile->lock);
ret = drm_ht_insert_item(ht, &ref->hash);
 
if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list);
kref_get(&base->refcount);
write_unlock(&tfile->lock);
if (existed != NULL)
*existed = false;
break;
}
 
write_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL);
 
ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref);
}
 
return ret;
}
EXPORT_SYMBOL(ttm_ref_object_add);
 
static void ttm_ref_object_release(struct kref *kref)
{
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
struct ttm_base_object *base = ref->obj;
struct ttm_object_file *tfile = ref->tfile;
struct drm_open_hash *ht;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 
ht = &tfile->ref_hash[ref->ref_type];
(void)drm_ht_remove_item(ht, &ref->hash);
list_del(&ref->head);
write_unlock(&tfile->lock);
 
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
base->ref_obj_release(base, ref->ref_type);
 
ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref);
write_lock(&tfile->lock);
}
 
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
unsigned long key, enum ttm_ref_type ref_type)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
struct drm_hash_item *hash;
int ret;
 
write_lock(&tfile->lock);
ret = drm_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
write_unlock(&tfile->lock);
return -EINVAL;
}
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
kref_put(&ref->kref, ttm_ref_object_release);
write_unlock(&tfile->lock);
return 0;
}
EXPORT_SYMBOL(ttm_ref_object_base_unref);
 
void ttm_object_file_release(struct ttm_object_file **p_tfile)
{
struct ttm_ref_object *ref;
struct list_head *list;
unsigned int i;
struct ttm_object_file *tfile = *p_tfile;
 
*p_tfile = NULL;
write_lock(&tfile->lock);
 
/*
* Since we release the lock within the loop, we have to
* restart it from the beginning each time.
*/
 
while (!list_empty(&tfile->ref_list)) {
list = tfile->ref_list.next;
ref = list_entry(list, struct ttm_ref_object, head);
ttm_ref_object_release(&ref->kref);
}
 
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
 
write_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);
 
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order)
{
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
unsigned int i;
unsigned int j = 0;
int ret;
 
if (unlikely(tfile == NULL))
return NULL;
 
rwlock_init(&tfile->lock);
tfile->tdev = tdev;
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
 
for (i = 0; i < TTM_REF_NUM; ++i) {
ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
if (ret) {
j = i;
goto out_err;
}
}
 
return tfile;
out_err:
for (i = 0; i < j; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
 
kfree(tfile);
 
return NULL;
}
EXPORT_SYMBOL(ttm_object_file_init);
 
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
*mem_glob,
unsigned int hash_order)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
int ret;
 
if (unlikely(tdev == NULL))
return NULL;
 
tdev->mem_glob = mem_glob;
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
 
if (likely(ret == 0))
return tdev;
 
kfree(tdev);
return NULL;
}
EXPORT_SYMBOL(ttm_object_device_init);
 
void ttm_object_device_release(struct ttm_object_device **p_tdev)
{
struct ttm_object_device *tdev = *p_tdev;
 
*p_tdev = NULL;
 
spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
spin_unlock(&tdev->object_lock);
 
kfree(tdev);
}
EXPORT_SYMBOL(ttm_object_device_release);
/drivers/video/drm/ttm/ttm_page_alloc.c
0,0 → 1,923
/*
* Copyright (c) Red Hat Inc.
 
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie <airlied@redhat.com>
* Jerome Glisse <jglisse@redhat.com>
* Pauli Nieminen <suokkos@gmail.com>
*/
 
/* simple list based uncached page pool
* - Pool collects resently freed pages for reuse
* - Use page->lru to keep a free list
* - doesn't track currently in use pages
*/
 
#define pr_fmt(fmt) "[TTM] " fmt
 
#include <linux/list.h>
#include <linux/spinlock.h>
//#include <linux/highmem.h>
//#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
//#include <linux/dma-mapping.h>
 
//#include <linux/atomic.h>
 
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
 
#ifdef TTM_HAS_AGP
#include <asm/agp.h>
#endif
 
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 16
#define FREE_ALL_PAGES (~0U)
/* times are in msecs */
#define PAGE_FREE_INTERVAL 1000
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
 
 
#if 0
/**
* struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
*
* @lock: Protects the shared pool from concurrnet access. Must be used with
* irqsave/irqrestore variants because pool allocator maybe called from
* delayed work.
* @fill_lock: Prevent concurrent calls to fill.
* @list: Pool of free uc/wc pages for fast reuse.
* @gfp_flags: Flags to pass for alloc_page.
* @npages: Number of pages in pool.
*/
struct ttm_page_pool {
spinlock_t lock;
bool fill_lock;
struct list_head list;
gfp_t gfp_flags;
unsigned npages;
char *name;
unsigned long nfrees;
unsigned long nrefills;
};
 
/**
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
* so forcing serialization to access them is pointless.
*/
 
struct ttm_pool_opts {
unsigned alloc_size;
unsigned max_size;
unsigned small;
};
 
#define NUM_POOLS 4
 
/**
* struct ttm_pool_manager - Holds memory pools for fst allocation
*
* Manager is read only object for pool code so it doesn't need locking.
*
* @free_interval: minimum number of jiffies between freeing pages from pool.
* @page_alloc_inited: reference counting for pool allocation.
* @work: Work that is used to shrink the pool. Work is only run when there is
* some pages to free.
* @small_allocation: Limit in number of pages what is small allocation.
*
* @pools: All pool objects in use.
**/
struct ttm_pool_manager {
struct kobject kobj;
struct shrinker mm_shrink;
struct ttm_pool_opts options;
 
union {
struct ttm_page_pool pools[NUM_POOLS];
struct {
struct ttm_page_pool wc_pool;
struct ttm_page_pool uc_pool;
struct ttm_page_pool wc_pool_dma32;
struct ttm_page_pool uc_pool_dma32;
} ;
};
};
 
static struct attribute ttm_page_pool_max = {
.name = "pool_max_size",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_small = {
.name = "pool_small_allocation",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_alloc_size = {
.name = "pool_allocation_size",
.mode = S_IRUGO | S_IWUSR
};
 
static struct attribute *ttm_pool_attrs[] = {
&ttm_page_pool_max,
&ttm_page_pool_small,
&ttm_page_pool_alloc_size,
NULL
};
 
static void ttm_pool_kobj_release(struct kobject *kobj)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
kfree(m);
}
 
static ssize_t ttm_pool_store(struct kobject *kobj,
struct attribute *attr, const char *buffer, size_t size)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
int chars;
unsigned val;
chars = sscanf(buffer, "%u", &val);
if (chars == 0)
return size;
 
/* Convert kb to number of pages */
val = val / (PAGE_SIZE >> 10);
 
if (attr == &ttm_page_pool_max)
m->options.max_size = val;
else if (attr == &ttm_page_pool_small)
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
pr_warn("Setting allocation size to larger than %lu is not recommended\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
 
return size;
}
 
static ssize_t ttm_pool_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
unsigned val = 0;
 
if (attr == &ttm_page_pool_max)
val = m->options.max_size;
else if (attr == &ttm_page_pool_small)
val = m->options.small;
else if (attr == &ttm_page_pool_alloc_size)
val = m->options.alloc_size;
 
val = val * (PAGE_SIZE >> 10);
 
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
}
 
static const struct sysfs_ops ttm_pool_sysfs_ops = {
.show = &ttm_pool_show,
.store = &ttm_pool_store,
};
 
static struct kobj_type ttm_pool_kobj_type = {
.release = &ttm_pool_kobj_release,
.sysfs_ops = &ttm_pool_sysfs_ops,
.default_attrs = ttm_pool_attrs,
};
 
static struct ttm_pool_manager *_manager;
 
#ifndef CONFIG_X86
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
 
for (i = 0; i < addrinarray; i++)
unmap_page_from_agp(pages[i]);
#endif
return 0;
}
 
static int set_pages_array_wc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
 
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
return 0;
}
 
static int set_pages_array_uc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
 
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
return 0;
}
#endif
 
/**
* Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags,
enum ttm_caching_state cstate)
{
int pool_index;
 
if (cstate == tt_cached)
return NULL;
 
if (cstate == tt_wc)
pool_index = 0x0;
else
pool_index = 0x1;
 
if (flags & TTM_PAGE_FLAG_DMA32)
pool_index |= 0x2;
 
return &_manager->pools[pool_index];
}
 
/* set memory back to wb and free the pages. */
static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
pr_err("Failed to set %d pages to wb!\n", npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
}
 
static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
unsigned freed_pages)
{
pool->npages -= freed_pages;
pool->nfrees += freed_pages;
}
 
/**
* Free pages from pool.
*
* To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
* number of pages in one go.
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
**/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
{
unsigned long irq_flags;
struct page *p;
struct page **pages_to_free;
unsigned freed_pages = 0,
npages_to_free = nr_free;
 
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
 
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
}
 
restart:
spin_lock_irqsave(&pool->lock, irq_flags);
 
list_for_each_entry_reverse(p, &pool->list, lru) {
if (freed_pages >= npages_to_free)
break;
 
pages_to_free[freed_pages++] = p;
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
/* remove range of pages from the pool */
__list_del(p->lru.prev, &pool->list);
 
ttm_pool_update_free_locked(pool, freed_pages);
/**
* Because changing page caching is costly
* we unlock the pool to prevent stalling.
*/
spin_unlock_irqrestore(&pool->lock, irq_flags);
 
ttm_pages_put(pages_to_free, freed_pages);
if (likely(nr_free != FREE_ALL_PAGES))
nr_free -= freed_pages;
 
if (NUM_PAGES_TO_ALLOC >= nr_free)
npages_to_free = nr_free;
else
npages_to_free = NUM_PAGES_TO_ALLOC;
 
freed_pages = 0;
 
/* free all so restart the processing */
if (nr_free)
goto restart;
 
/* Not allowed to fall through or break because
* following context is inside spinlock while we are
* outside here.
*/
goto out;
 
}
}
 
/* remove range of pages from the pool */
if (freed_pages) {
__list_del(&p->lru, &pool->list);
 
ttm_pool_update_free_locked(pool, freed_pages);
nr_free -= freed_pages;
}
 
spin_unlock_irqrestore(&pool->lock, irq_flags);
 
if (freed_pages)
ttm_pages_put(pages_to_free, freed_pages);
out:
kfree(pages_to_free);
return nr_free;
}
 
/* Get good estimation how many pages are free in pools */
static int ttm_pool_get_num_unused_pages(void)
{
unsigned i;
int total = 0;
for (i = 0; i < NUM_POOLS; ++i)
total += _manager->pools[i].npages;
 
return total;
}
 
/**
* Callback for mm to request pool to reduce number of page held.
*/
static int ttm_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
 
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
}
/* return estimated number of unused pages in pool */
return ttm_pool_get_num_unused_pages();
}
 
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
 
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
{
unregister_shrinker(&manager->mm_shrink);
}
 
static int ttm_set_pages_caching(struct page **pages,
enum ttm_caching_state cstate, unsigned cpages)
{
int r = 0;
/* Set page caching */
switch (cstate) {
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to uc!\n", cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to wc!\n", cpages);
break;
default:
break;
}
return r;
}
 
/**
* Free pages the pages that failed to change the caching state. If there is
* any pages that have changed their caching state already put them to the
* pool.
*/
static void ttm_handle_caching_state_failure(struct list_head *pages,
int ttm_flags, enum ttm_caching_state cstate,
struct page **failed_pages, unsigned cpages)
{
unsigned i;
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
list_del(&failed_pages[i]->lru);
__free_page(failed_pages[i]);
}
}
 
/**
* Allocate new pages with correct caching.
*
* This function is reentrant if caller updates count depending on number of
* pages returned in pages array.
*/
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
struct page **caching_array;
struct page *p;
int r = 0;
unsigned i, cpages;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
 
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
 
if (!caching_array) {
pr_err("Unable to allocate table for new pages\n");
return -ENOMEM;
}
 
for (i = 0, cpages = 0; i < count; ++i) {
p = alloc_page(gfp_flags);
 
if (!p) {
pr_err("Unable to get page %u\n", i);
 
/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
}
r = -ENOMEM;
goto out;
}
 
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
if (!PageHighMem(p))
#endif
{
caching_array[cpages++] = p;
if (cpages == max_cpages) {
 
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r) {
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
goto out;
}
cpages = 0;
}
}
 
list_add(&p->lru, pages);
}
 
if (cpages) {
r = ttm_set_pages_caching(caching_array, cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
}
out:
kfree(caching_array);
 
return r;
}
 
/**
* Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
int ttm_flags, enum ttm_caching_state cstate, unsigned count,
unsigned long *irq_flags)
{
struct page *p;
int r;
unsigned cpages = 0;
/**
* Only allow one pool fill operation at a time.
* If pool doesn't have enough pages for the allocation new pages are
* allocated from outside of pool.
*/
if (pool->fill_lock)
return;
 
pool->fill_lock = true;
 
/* If allocation request is small and there are not enough
* pages in a pool we fill the pool up first. */
if (count < _manager->options.small
&& count > pool->npages) {
struct list_head new_pages;
unsigned alloc_size = _manager->options.alloc_size;
 
/**
* Can't change page caching if in irqsave context. We have to
* drop the pool->lock.
*/
spin_unlock_irqrestore(&pool->lock, *irq_flags);
 
INIT_LIST_HEAD(&new_pages);
r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
cstate, alloc_size);
spin_lock_irqsave(&pool->lock, *irq_flags);
 
if (!r) {
list_splice(&new_pages, &pool->list);
++pool->nrefills;
pool->npages += alloc_size;
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
pool->npages += cpages;
}
 
}
pool->fill_lock = false;
}
 
/**
* Cut 'count' number of pages from the pool and put them on the return list.
*
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages,
int ttm_flags,
enum ttm_caching_state cstate,
unsigned count)
{
unsigned long irq_flags;
struct list_head *p;
unsigned i;
 
spin_lock_irqsave(&pool->lock, irq_flags);
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
 
if (count >= pool->npages) {
/* take all pages from the pool */
list_splice_init(&pool->list, pages);
count -= pool->npages;
pool->npages = 0;
goto out;
}
/* find the last pages to include for requested number of pages. Split
* pool to begin and halve it to reduce search space. */
if (count <= pool->npages/2) {
i = 0;
list_for_each(p, &pool->list) {
if (++i == count)
break;
}
} else {
i = pool->npages + 1;
list_for_each_prev(p, &pool->list) {
if (--i == count)
break;
}
}
/* Cut 'count' number of pages from the pool */
list_cut_position(pages, &pool->list, p);
pool->npages -= count;
count = 0;
out:
spin_unlock_irqrestore(&pool->lock, irq_flags);
return count;
}
#endif
 
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
unsigned long irq_flags;
// struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i;
 
for (i = 0; i < npages; i++) {
if (pages[i]) {
// if (page_count(pages[i]) != 1)
// pr_err("Erroneous page count. Leaking pages.\n");
FreePage(pages[i]);
pages[i] = NULL;
}
}
return;
 
#if 0
if (pool == NULL) {
/* No pool for this memory type so free the pages */
for (i = 0; i < npages; i++) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
pr_err("Erroneous page count. Leaking pages.\n");
__free_page(pages[i]);
pages[i] = NULL;
}
}
return;
}
 
spin_lock_irqsave(&pool->lock, irq_flags);
for (i = 0; i < npages; i++) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
pr_err("Erroneous page count. Leaking pages.\n");
list_add_tail(&pages[i]->lru, &pool->list);
pages[i] = NULL;
pool->npages++;
}
}
/* Check that we don't go over the pool limit */
npages = 0;
if (pool->npages > _manager->options.max_size) {
npages = pool->npages - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */
if (npages < NUM_PAGES_TO_ALLOC)
npages = NUM_PAGES_TO_ALLOC;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
ttm_page_pool_free(pool, npages);
#endif
 
}
 
/*
* On success pages list will hold count number of correctly
* cached pages.
*/
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
// struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct list_head plist;
struct page *p = NULL;
// gfp_t gfp_flags = GFP_USER;
unsigned count;
int r;
 
for (r = 0; r < npages; ++r) {
p = AllocPage();
if (!p) {
 
pr_err("Unable to allocate page\n");
return -ENOMEM;
}
 
pages[r] = p;
}
return 0;
 
#if 0
 
 
/* set zero flag for page allocation if required */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
 
/* No pool for cached pages */
if (pool == NULL) {
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
 
for (r = 0; r < npages; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
 
pr_err("Unable to allocate page\n");
return -ENOMEM;
}
 
pages[r] = p;
}
return 0;
}
 
/* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags;
 
/* First we take pages from the pool */
INIT_LIST_HEAD(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
 
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, &plist, lru) {
if (PageHighMem(p))
clear_highpage(p);
else
clear_page(page_address(p));
}
}
 
/* If pool didn't have enough pages allocate new one. */
if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
INIT_LIST_HEAD(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
pr_err("Failed to allocate extra pages for large request\n");
ttm_put_pages(pages, count, flags, cstate);
return r;
}
}
#endif
 
return 0;
}
 
#if 0
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
spin_lock_init(&pool->lock);
pool->fill_lock = false;
INIT_LIST_HEAD(&pool->list);
pool->npages = pool->nfrees = 0;
pool->gfp_flags = flags;
pool->name = name;
}
 
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
int ret;
 
WARN_ON(_manager);
 
pr_info("Initializing pool allocator\n");
 
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
 
ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
 
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
GFP_USER | GFP_DMA32, "wc dma");
 
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
GFP_USER | GFP_DMA32, "uc dma");
 
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
 
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "pool");
if (unlikely(ret != 0)) {
kobject_put(&_manager->kobj);
_manager = NULL;
return ret;
}
 
ttm_pool_mm_shrink_init(_manager);
 
return 0;
}
 
void ttm_page_alloc_fini(void)
{
int i;
 
pr_info("Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager);
 
for (i = 0; i < NUM_POOLS; ++i)
ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
 
kobject_put(&_manager->kobj);
_manager = NULL;
}
 
#endif
 
int ttm_pool_populate(struct ttm_tt *ttm)
{
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
unsigned i;
int ret;
 
if (ttm->state != tt_unpopulated)
return 0;
 
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_get_pages(&ttm->pages[i], 1,
ttm->page_flags,
ttm->caching_state);
if (ret != 0) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
}
 
}
 
ttm->state = tt_unbound;
return 0;
}
EXPORT_SYMBOL(ttm_pool_populate);
 
void ttm_pool_unpopulate(struct ttm_tt *ttm)
{
unsigned i;
 
for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i]) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]);
ttm_put_pages(&ttm->pages[i], 1,
ttm->page_flags,
ttm->caching_state);
}
}
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
 
/drivers/video/drm/ttm/ttm_tt.c
0,0 → 1,387
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
 
#define pr_fmt(fmt) "[TTM] " fmt
 
#include <syscall.h>
 
#include <linux/sched.h>
//#include <linux/highmem.h>
//#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
//#include <linux/file.h>
//#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/export.h>
//#include <drm/drm_cache.h>
#include <drm/drm_mem_util.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
 
/**
* Allocates storage for pointers to the pages that back the ttm.
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
}
 
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
sizeof(*ttm->dma_address));
}
 
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_old,
enum ttm_caching_state c_new)
{
int ret = 0;
 
if (PageHighMem(p))
return 0;
 
if (c_old != tt_cached) {
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
 
ret = set_pages_wb(p, 1);
if (ret)
return ret;
}
 
if (c_new == tt_wc)
ret = set_memory_wc((unsigned long) page_address(p), 1);
else if (c_new == tt_uncached)
ret = set_pages_uc(p, 1);
 
return ret;
}
#else /* CONFIG_X86 */
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_old,
enum ttm_caching_state c_new)
{
return 0;
}
#endif /* CONFIG_X86 */
 
/*
* Change caching policy for the linear kernel map
* for range of pages in a ttm.
*/
 
static int ttm_tt_set_caching(struct ttm_tt *ttm,
enum ttm_caching_state c_state)
{
int i, j;
struct page *cur_page;
int ret;
 
if (ttm->caching_state == c_state)
return 0;
 
if (ttm->state == tt_unpopulated) {
/* Change caching but don't populate */
ttm->caching_state = c_state;
return 0;
}
 
// if (ttm->caching_state == tt_cached)
// drm_clflush_pages(ttm->pages, ttm->num_pages);
 
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
if (likely(cur_page != NULL)) {
ret = ttm_tt_set_page_caching(cur_page,
ttm->caching_state,
c_state);
if (unlikely(ret != 0))
goto out_err;
}
}
 
ttm->caching_state = c_state;
 
return 0;
 
out_err:
for (j = 0; j < i; ++j) {
cur_page = ttm->pages[j];
if (likely(cur_page != NULL)) {
(void)ttm_tt_set_page_caching(cur_page, c_state,
ttm->caching_state);
}
}
 
return ret;
}
 
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
{
enum ttm_caching_state state;
 
if (placement & TTM_PL_FLAG_WC)
state = tt_wc;
else if (placement & TTM_PL_FLAG_UNCACHED)
state = tt_uncached;
else
state = tt_cached;
 
return ttm_tt_set_caching(ttm, state);
}
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 
void ttm_tt_destroy(struct ttm_tt *ttm)
{
if (unlikely(ttm == NULL))
return;
 
if (ttm->state == tt_bound) {
ttm_tt_unbind(ttm);
}
 
if (likely(ttm->pages != NULL)) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
 
// if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
// ttm->swap_storage)
// fput(ttm->swap_storage);
 
ttm->swap_storage = NULL;
ttm->func->destroy(ttm);
}
 
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
ttm->dummy_read_page = dummy_read_page;
ttm->state = tt_unpopulated;
ttm->swap_storage = NULL;
 
ttm_tt_alloc_page_directory(ttm);
if (!ttm->pages) {
ttm_tt_destroy(ttm);
printf("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(ttm_tt_init);
 
void ttm_tt_fini(struct ttm_tt *ttm)
{
drm_free_large(ttm->pages);
ttm->pages = NULL;
}
EXPORT_SYMBOL(ttm_tt_fini);
 
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
 
ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
ttm->dummy_read_page = dummy_read_page;
ttm->state = tt_unpopulated;
ttm->swap_storage = NULL;
 
INIT_LIST_HEAD(&ttm_dma->pages_list);
ttm_dma_tt_alloc_page_directory(ttm_dma);
if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
printf("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(ttm_dma_tt_init);
 
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
 
drm_free_large(ttm->pages);
ttm->pages = NULL;
drm_free_large(ttm_dma->dma_address);
ttm_dma->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
 
void ttm_tt_unbind(struct ttm_tt *ttm)
{
int ret;
 
if (ttm->state == tt_bound) {
ret = ttm->func->unbind(ttm);
BUG_ON(ret);
ttm->state = tt_unbound;
}
}
 
#if 0
 
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
int ret = 0;
 
if (!ttm)
return -EINVAL;
 
if (ttm->state == tt_bound)
return 0;
 
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
 
ret = ttm->func->bind(ttm, bo_mem);
if (unlikely(ret != 0))
return ret;
 
ttm->state = tt_bound;
 
return 0;
}
EXPORT_SYMBOL(ttm_tt_bind);
#endif
 
/*
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
int i;
int ret = -ENOMEM;
 
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
 
swap_space = file_inode(swap_storage)->i_mapping;
 
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page(swap_space, i);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
}
to_page = ttm->pages[i];
if (unlikely(to_page == NULL))
goto out_err;
 
copy_highpage(to_page, from_page);
page_cache_release(from_page);
}
 
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
fput(swap_storage);
ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
 
return 0;
out_err:
return ret;
}
 
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
{
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
int i;
int ret = -ENOMEM;
 
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
 
if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
0);
if (unlikely(IS_ERR(swap_storage))) {
pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage);
}
} else
swap_storage = persistent_swap_storage;
 
swap_space = file_inode(swap_storage)->i_mapping;
 
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
to_page = shmem_read_mapping_page(swap_space, i);
if (unlikely(IS_ERR(to_page))) {
ret = PTR_ERR(to_page);
goto out_err;
}
copy_highpage(to_page, from_page);
set_page_dirty(to_page);
mark_page_accessed(to_page);
page_cache_release(to_page);
}
 
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
 
return 0;
out_err:
if (!persistent_swap_storage)
fput(swap_storage);
 
return ret;
}
 
*/
 
 
/drivers/video/drm/vmwgfx/Makefile
0,0 → 1,100
 
 
CC = gcc
LD = ld
AS = as
FASM = fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DRV_TOPDIR = $(CURDIR)/../../..
DRM_TOPDIR = $(CURDIR)/..
 
DRV_INCLUDES = $(DRV_TOPDIR)/include
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux
 
CFLAGS = -c -O2 $(INCLUDES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
 
LIBPATH:= $(DRV_TOPDIR)/ddk
 
LIBS:= -lddk -lcore -lgcc
 
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0\
--file-alignment 512 --section-alignment 4096
 
 
NAME:= vmwgfx
 
HFILES:= $(DRV_INCLUDES)/linux/types.h \
$(DRV_INCLUDES)/linux/list.h \
$(DRV_INCLUDES)/linux/pci.h \
$(DRV_INCLUDES)/drm/drm.h \
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
$(DRV_INCLUDES)/drm/drm_mode.h \
$(DRV_INCLUDES)/drm/drm_mm.h
 
NAME_SRC= \
main.c \
pci.c \
vmwgfx_buffer.c \
vmwgfx_context.c \
vmwgfx_dmabuf.c \
vmwgfx_drv.c \
vmwgfx_execbuf.c \
vmwgfx_fence.c \
vmwgfx_fifo.c \
vmwgfx_gmr.c \
vmwgfx_gmrid_manager.c \
vmwgfx_irq.c \
vmwgfx_kms.c \
vmwgfx_marker.c \
vmwgfx_resource.c \
vmwgfx_scrn.c \
vmwgfx_surface.c \
vmwgfx_ttm_glue.c \
../i2c/i2c-core.c \
../ttm/ttm_bo.c \
../ttm/ttm_bo_manager.c \
../ttm/ttm_execbuf_util.c \
../ttm/ttm_memory.c \
../ttm/ttm_object.c \
../ttm/ttm_page_alloc.c \
../ttm/ttm_tt.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_global.c \
$(DRM_TOPDIR)/drm_hashtab.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_stub.c
 
 
 
 
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
$(patsubst %.c, %.o, $(NAME_SRC))))
 
 
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a vmw.lds Makefile
$(LD) -L$(LIBPATH) $(LDFLAGS) -T vmw.lds -o $@ $(NAME_OBJS) $(LIBS)
kpack $@
 
 
%.o : %.c $(HFILES) Makefile
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
 
%.o : %.S $(HFILES) Makefile
$(AS) -o $@ $<
 
clean:
-rm -f */*.o
 
/drivers/video/drm/vmwgfx/main.c
0,0 → 1,812
#include <drm/drmP.h>
#include <drm.h>
 
#include <linux/kernel.h>
#include <linux/module.h>
 
#include "vmwgfx_drv.h"
 
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <linux/pci.h>
#include <syscall.h>
 
#include "bitmap.h"
 
struct pci_device {
uint16_t domain;
uint8_t bus;
uint8_t dev;
uint8_t func;
uint16_t vendor_id;
uint16_t device_id;
uint16_t subvendor_id;
uint16_t subdevice_id;
uint32_t device_class;
uint8_t revision;
};
 
extern struct drm_device *main_device;
extern struct drm_file *drm_file_handlers[256];
 
int vmw_init(void);
void cpu_detect();
 
void parse_cmdline(char *cmdline, char *log);
int _stdcall display_handler(ioctl_t *io);
 
int srv_blit_bitmap(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
int blit_textured(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
int blit_tex(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
void get_pci_info(struct pci_device *dev);
int gem_getparam(struct drm_device *dev, void *data);
 
int i915_mask_update(struct drm_device *dev, void *data,
struct drm_file *file);
 
 
static char log[256];
 
struct workqueue_struct *system_wq;
int driver_wq_state;
 
int x86_clflush_size;
unsigned int tsc_khz;
 
int i915_modeset = 1;
 
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
 
int err = 0;
 
if(action != 1)
{
driver_wq_state = 0;
return 0;
};
 
if( GetService("DISPLAY") != 0 )
return 0;
 
if( cmdline && *cmdline )
parse_cmdline(cmdline, log);
 
if(!dbg_open(log))
{
// strcpy(log, "/tmp1/1/vmw.log");
// strcpy(log, "/RD/1/DRIVERS/VMW.log");
strcpy(log, "/HD0/1/vmw.log");
 
if(!dbg_open(log))
{
printf("Can't open %s\nExit\n", log);
return 0;
};
}
dbgprintf(" vmw v3.10\n cmdline: %s\n", cmdline);
 
cpu_detect();
dbgprintf("\ncache line size %d\n", x86_clflush_size);
 
enum_pci_devices();
 
err = vmw_init();
 
if(err)
{
dbgprintf("Epic Fail :(\n");
return 0;
};
 
err = RegService("DISPLAY", display_handler);
 
if( err != 0)
dbgprintf("Set DISPLAY handler\n");
 
// struct drm_i915_private *dev_priv = main_device->dev_private;
// driver_wq_state = 1;
// run_workqueue(dev_priv->wq);
 
return err;
};
 
 
#define CURRENT_API 0x0200 /* 2.00 */
#define COMPATIBLE_API 0x0100 /* 1.00 */
 
#define API_VERSION (COMPATIBLE_API << 16) | CURRENT_API
#define DISPLAY_VERSION API_VERSION
 
 
#define SRV_GETVERSION 0
#define SRV_ENUM_MODES 1
#define SRV_SET_MODE 2
#define SRV_GET_CAPS 3
 
#define SRV_CREATE_SURFACE 10
#define SRV_DESTROY_SURFACE 11
#define SRV_LOCK_SURFACE 12
#define SRV_UNLOCK_SURFACE 13
#define SRV_RESIZE_SURFACE 14
#define SRV_BLIT_BITMAP 15
#define SRV_BLIT_TEXTURE 16
#define SRV_BLIT_VIDEO 17
 
 
#define SRV_GET_PCI_INFO 20
#define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
#define SRV_I915_GEM_SET_CACHEING 25
#define SRV_I915_GEM_GET_APERTURE 26
#define SRV_I915_GEM_PWRITE 27
#define SRV_I915_GEM_BUSY 28
#define SRV_I915_GEM_SET_DOMAIN 29
#define SRV_I915_GEM_MMAP 30
#define SRV_I915_GEM_MMAP_GTT 31
#define SRV_I915_GEM_THROTTLE 32
#define SRV_FBINFO 33
#define SRV_I915_GEM_EXECBUFFER2 34
#define SRV_MASK_UPDATE 35
 
 
 
#define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
break;
 
#define check_output(size) \
if( unlikely((outp==NULL)||(io->out_size != (size))) ) \
break;
 
int _stdcall display_handler(ioctl_t *io)
{
struct drm_file *file;
 
int retval = -1;
u32_t *inp;
u32_t *outp;
 
inp = io->input;
outp = io->output;
 
file = drm_file_handlers[0];
 
switch(io->io_code)
{
case SRV_GETVERSION:
check_output(4);
*outp = DISPLAY_VERSION;
retval = 0;
break;
#if 0
case SRV_ENUM_MODES:
// dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
// inp, io->inp_size, io->out_size );
check_output(4);
// check_input(*outp * sizeof(videomode_t));
if( i915_modeset)
retval = get_videomodes((videomode_t*)inp, outp);
break;
 
case SRV_SET_MODE:
// dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
// inp, io->inp_size);
check_input(sizeof(videomode_t));
if( i915_modeset )
retval = set_user_mode((videomode_t*)inp);
break;
 
case SRV_GET_CAPS:
retval = get_driver_caps((hwcaps_t*)inp);
break;
 
case SRV_CREATE_SURFACE:
// check_input(8);
// retval = create_surface(main_device, (struct io_call_10*)inp);
break;
 
case SRV_LOCK_SURFACE:
// retval = lock_surface((struct io_call_12*)inp);
break;
 
case SRV_RESIZE_SURFACE:
// retval = resize_surface((struct io_call_14*)inp);
break;
 
case SRV_BLIT_BITMAP:
// srv_blit_bitmap( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
// blit_tex( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
break;
 
case SRV_GET_PCI_INFO:
get_pci_info((struct pci_device *)inp);
retval = 0;
break;
 
case SRV_GET_PARAM:
retval = gem_getparam(main_device, inp);
break;
 
case SRV_I915_GEM_CREATE:
retval = i915_gem_create_ioctl(main_device, inp, file);
break;
 
case SRV_DRM_GEM_CLOSE:
retval = drm_gem_close_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_PIN:
retval = i915_gem_pin_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_SET_CACHEING:
retval = i915_gem_set_caching_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_GET_APERTURE:
retval = i915_gem_get_aperture_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_PWRITE:
retval = i915_gem_pwrite_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_BUSY:
retval = i915_gem_busy_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_SET_DOMAIN:
retval = i915_gem_set_domain_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_THROTTLE:
retval = i915_gem_throttle_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_MMAP:
retval = i915_gem_mmap_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_MMAP_GTT:
retval = i915_gem_mmap_gtt_ioctl(main_device, inp, file);
break;
 
 
case SRV_FBINFO:
retval = i915_fbinfo(inp);
break;
 
case SRV_I915_GEM_EXECBUFFER2:
retval = i915_gem_execbuffer2(main_device, inp, file);
break;
 
case SRV_MASK_UPDATE:
retval = i915_mask_update(main_device, inp, file);
break;
#endif
 
};
 
return retval;
}
 
 
#define PCI_CLASS_REVISION 0x08
#define PCI_CLASS_DISPLAY_VGA 0x0300
#define PCI_CLASS_BRIDGE_HOST 0x0600
#define PCI_CLASS_BRIDGE_ISA 0x0601
 
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
{
u16_t vendor, device;
u32_t class;
int ret = 0;
 
vendor = id & 0xffff;
device = (id >> 16) & 0xffff;
 
if(vendor == 0x15AD )
{
class = PciRead32(busnr, devfn, PCI_CLASS_REVISION);
class >>= 16;
 
if( class == PCI_CLASS_DISPLAY_VGA )
ret = 1;
}
return ret;
};
 
 
static char* parse_path(char *p, char *log)
{
char c;
 
while( (c = *p++) == ' ');
p--;
while( (c = *log++ = *p++) && (c != ' '));
*log = 0;
 
return p;
};
 
void parse_cmdline(char *cmdline, char *log)
{
char *p = cmdline;
 
char c = *p++;
 
while( c )
{
if( c == '-')
{
switch(*p++)
{
case 'l':
p = parse_path(p, log);
break;
};
};
c = *p++;
};
};
 
 
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (*eax), "2" (*ecx)
: "memory");
}
 
 
 
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = 0;
__cpuid(eax, ebx, ecx, edx);
}
 
void cpu_detect()
{
u32 junk, tfms, cap0, misc;
 
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 
if (cap0 & (1<<19))
{
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
}
 
tsc_khz = (unsigned int)(GetCpuFreq()/1000);
}
 
/*
int get_driver_caps(hwcaps_t *caps)
{
int ret = 0;
 
switch(caps->idx)
{
case 0:
caps->opt[0] = 0;
caps->opt[1] = 0;
break;
 
case 1:
caps->cap1.max_tex_width = 4096;
caps->cap1.max_tex_height = 4096;
break;
default:
ret = 1;
};
caps->idx = 1;
return ret;
}
 
 
void get_pci_info(struct pci_device *dev)
{
struct pci_dev *pdev = main_device->pdev;
 
memset(dev, sizeof(*dev), 0);
 
dev->domain = 0;
dev->bus = pdev->busnr;
dev->dev = pdev->devfn >> 3;
dev->func = pdev->devfn & 7;
dev->vendor_id = pdev->vendor;
dev->device_id = pdev->device;
dev->revision = pdev->revision;
};
 
*/
 
#include <ddk.h>
#include <linux/mm.h>
#include <drm/drmP.h>
#include <linux/hdmi.h>
#include <linux/ctype.h>
 
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
 
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = 13;
 
return 0;
}
 
 
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
{
while (bytes) {
if (*start != value)
return (void *)start;
start++;
bytes--;
}
return NULL;
}
 
/**
* memchr_inv - Find an unmatching character in an area of memory.
* @start: The memory area
* @c: Find a character other than c
* @bytes: The size of the area.
*
* returns the address of the first character other than @c, or %NULL
* if the whole buffer contains just @c.
*/
void *memchr_inv(const void *start, int c, size_t bytes)
{
u8 value = c;
u64 value64;
unsigned int words, prefix;
 
if (bytes <= 16)
return check_bytes8(start, value, bytes);
 
value64 = value;
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
value64 |= value64 << 8;
value64 |= value64 << 16;
value64 |= value64 << 32;
#endif
 
prefix = (unsigned long)start % 8;
if (prefix) {
u8 *r;
 
prefix = 8 - prefix;
r = check_bytes8(start, value, prefix);
if (r)
return r;
start += prefix;
bytes -= prefix;
}
 
words = bytes / 8;
 
while (words) {
if (*(u64 *)start != value64)
return check_bytes8(start, value, 8);
start += 8;
words--;
}
 
return check_bytes8(start, value, bytes % 8);
}
 
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
 
i = vsnprintf(buf, size, fmt, args);
 
if (likely(i < size))
return i;
if (size != 0)
return size - 1;
return 0;
}
 
 
int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
 
va_start(args, fmt);
i = vscnprintf(buf, size, fmt, args);
va_end(args);
 
return i;
}
 
 
 
#define _U 0x01 /* upper */
#define _L 0x02 /* lower */
#define _D 0x04 /* digit */
#define _C 0x08 /* cntrl */
#define _P 0x10 /* punct */
#define _S 0x20 /* white space (space/lf/tab) */
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
 
extern const unsigned char _ctype[];
 
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
 
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
#define isdigit(c) ((__ismask(c)&(_D)) != 0)
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
/* Note: isspace() must return false for %NUL-terminator */
#define isspace(c) ((__ismask(c)&(_S)) != 0)
#define isupper(c) ((__ismask(c)&(_U)) != 0)
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
 
#define isascii(c) (((unsigned char)(c))<=0x7f)
#define toascii(c) (((unsigned char)(c))&0x7f)
 
 
 
//const char hex_asc[] = "0123456789abcdef";
 
/**
* hex_to_bin - convert a hex digit to its real value
* @ch: ascii character represents hex digit
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
*/
int hex_to_bin(char ch)
{
if ((ch >= '0') && (ch <= '9'))
return ch - '0';
ch = tolower(ch);
if ((ch >= 'a') && (ch <= 'f'))
return ch - 'a' + 10;
return -1;
}
EXPORT_SYMBOL(hex_to_bin);
 
/**
* hex2bin - convert an ascii hexadecimal string to its binary representation
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
*
* Return 0 on success, -1 in case of bad input.
*/
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
int hi = hex_to_bin(*src++);
int lo = hex_to_bin(*src++);
 
if ((hi < 0) || (lo < 0))
return -1;
 
*dst++ = (hi << 4) | lo;
}
return 0;
}
EXPORT_SYMBOL(hex2bin);
 
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @linebuf: where to put the converted data
* @linebuflen: total size of @linebuf, including space for terminating NUL
* @ascii: include ASCII after the hex output
*
* hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
*
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
* to a hex + ASCII dump at the supplied memory location.
* The converted output is always NUL-terminated.
*
* E.g.:
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
* linebuf, sizeof(linebuf), true);
*
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
*/
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii)
{
const u8 *ptr = buf;
u8 ch;
int j, lx = 0;
int ascii_column;
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
if (!len)
goto nil;
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
 
switch (groupsize) {
case 8: {
const u64 *ptr8 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
(unsigned long long)*(ptr8 + j));
ascii_column = 17 * ngroups + 2;
break;
}
 
case 4: {
const u32 *ptr4 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "", *(ptr4 + j));
ascii_column = 9 * ngroups + 2;
break;
}
 
case 2: {
const u16 *ptr2 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "", *(ptr2 + j));
ascii_column = 5 * ngroups + 2;
break;
}
 
default:
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
linebuf[lx++] = hex_asc_lo(ch);
linebuf[lx++] = ' ';
}
if (j)
lx--;
 
ascii_column = 3 * rowsize + 2;
break;
}
if (!ascii)
goto nil;
 
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
linebuf[lx++] = ' ';
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
}
nil:
linebuf[lx++] = '\0';
}
 
/**
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
* @level: kernel log level (e.g. KERN_DEBUG)
* @prefix_str: string to prefix each line with;
* caller supplies trailing spaces for alignment if desired
* @prefix_type: controls whether prefix of an offset, address, or none
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @ascii: include ASCII after the hex output
*
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
* to the kernel log at the specified kernel log level, with an optional
* leading prefix.
*
* print_hex_dump() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
* print_hex_dump() iterates over the entire input @buf, breaking it into
* "line size" chunks to format and print.
*
* E.g.:
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
* 16, 1, frame->data, frame->len, true);
*
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
*/
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
const u8 *ptr = buf;
int i, linelen, remaining = len;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
for (i = 0; i < len; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
 
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
linebuf, sizeof(linebuf), ascii);
 
switch (prefix_type) {
case DUMP_PREFIX_ADDRESS:
printk("%s%s%p: %s\n",
level, prefix_str, ptr + i, linebuf);
break;
case DUMP_PREFIX_OFFSET:
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
break;
default:
printk("%s%s%s\n", level, prefix_str, linebuf);
break;
}
}
}
 
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
const void *buf, size_t len)
{
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
buf, len, true);
}
 
 
/drivers/video/drm/vmwgfx/pci.c
0,0 → 1,880
 
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <pci.h>
#include <syscall.h>
 
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
 
static LIST_HEAD(devices);
 
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
 
/*
* Translate the low bits of the PCI base
* to the resource type
*/
static inline unsigned int pci_calc_resource_flags(unsigned int flags)
{
if (flags & PCI_BASE_ADDRESS_SPACE_IO)
return IORESOURCE_IO;
 
if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
return IORESOURCE_MEM | IORESOURCE_PREFETCH;
 
return IORESOURCE_MEM;
}
 
 
static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask)
{
u32_t size = mask & maxbase; /* Find the significant bits */
 
if (!size)
return 0;
 
/* Get the lowest of them to find the decode size, and
from that the extent. */
size = (size & ~(size-1)) - 1;
 
/* base == maxbase can be valid only if the BAR has
already been programmed with all 1s. */
if (base == maxbase && ((base | size) & mask) != mask)
return 0;
 
return size;
}
 
static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask)
{
u64_t size = mask & maxbase; /* Find the significant bits */
 
if (!size)
return 0;
 
/* Get the lowest of them to find the decode size, and
from that the extent. */
size = (size & ~(size-1)) - 1;
 
/* base == maxbase can be valid only if the BAR has
already been programmed with all 1s. */
if (base == maxbase && ((base | size) & mask) != mask)
return 0;
 
return size;
}
 
static inline int is_64bit_memory(u32_t mask)
{
if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
return 1;
return 0;
}
 
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
{
u32_t pos, reg, next;
u32_t l, sz;
struct resource *res;
 
for(pos=0; pos < howmany; pos = next)
{
u64_t l64;
u64_t sz64;
u32_t raw_sz;
 
next = pos + 1;
 
res = &dev->resource[pos];
 
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
l = PciRead32(dev->busnr, dev->devfn, reg);
PciWrite32(dev->busnr, dev->devfn, reg, ~0);
sz = PciRead32(dev->busnr, dev->devfn, reg);
PciWrite32(dev->busnr, dev->devfn, reg, l);
 
if (!sz || sz == 0xffffffff)
continue;
 
if (l == 0xffffffff)
l = 0;
 
raw_sz = sz;
if ((l & PCI_BASE_ADDRESS_SPACE) ==
PCI_BASE_ADDRESS_SPACE_MEMORY)
{
sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK);
/*
* For 64bit prefetchable memory sz could be 0, if the
* real size is bigger than 4G, so we need to check
* szhi for that.
*/
if (!is_64bit_memory(l) && !sz)
continue;
res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
}
else {
sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
if (!sz)
continue;
res->start = l & PCI_BASE_ADDRESS_IO_MASK;
res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
}
res->end = res->start + (unsigned long) sz;
res->flags |= pci_calc_resource_flags(l);
if (is_64bit_memory(l))
{
u32_t szhi, lhi;
 
lhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0);
szhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi);
sz64 = ((u64_t)szhi << 32) | raw_sz;
l64 = ((u64_t)lhi << 32) | l;
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
next++;
 
#if BITS_PER_LONG == 64
if (!sz64) {
res->start = 0;
res->end = 0;
res->flags = 0;
continue;
}
res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK;
res->end = res->start + sz64;
#else
if (sz64 > 0x100000000ULL) {
printk(KERN_ERR "PCI: Unable to handle 64-bit "
"BAR for device %s\n", pci_name(dev));
res->start = 0;
res->flags = 0;
}
else if (lhi)
{
/* 64-bit wide address, treat as disabled */
PciWrite32(dev->busnr, dev->devfn, reg,
l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK);
PciWrite32(dev->busnr, dev->devfn, reg+4, 0);
res->start = 0;
res->end = sz;
}
#endif
}
}
 
if ( rom )
{
dev->rom_base_reg = rom;
res = &dev->resource[PCI_ROM_RESOURCE];
 
l = PciRead32(dev->busnr, dev->devfn, rom);
PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE);
sz = PciRead32(dev->busnr, dev->devfn, rom);
PciWrite32(dev->busnr, dev->devfn, rom, l);
 
if (l == 0xffffffff)
l = 0;
 
if (sz && sz != 0xffffffff)
{
sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK);
 
if (sz)
{
res->flags = (l & IORESOURCE_ROM_ENABLE) |
IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
res->start = l & PCI_ROM_ADDRESS_MASK;
res->end = res->start + (unsigned long) sz;
}
}
}
}
 
static void pci_read_irq(struct pci_dev *dev)
{
u8_t irq;
 
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN);
dev->pin = irq;
if (irq)
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE);
dev->irq = irq;
};
 
 
int pci_setup_device(struct pci_dev *dev)
{
u32_t class;
 
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION);
dev->revision = class & 0xff;
class >>= 8; /* upper 3 bytes */
dev->class = class;
 
/* "Unknown power state" */
// dev->current_state = PCI_UNKNOWN;
 
/* Early fixups, before probing the BARs */
// pci_fixup_device(pci_fixup_early, dev);
class = dev->class >> 8;
 
switch (dev->hdr_type)
{
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
goto bad;
pci_read_irq(dev);
pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID);
dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID);
 
/*
* Do the ugly legacy mode stuff here rather than broken chip
* quirk code. Legacy mode ATA controllers have fixed
* addresses. These are not always echoed in BAR0-3, and
* BAR0-3 in a few cases contain junk!
*/
if (class == PCI_CLASS_STORAGE_IDE)
{
u8_t progif;
 
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG);
if ((progif & 1) == 0)
{
dev->resource[0].start = 0x1F0;
dev->resource[0].end = 0x1F7;
dev->resource[0].flags = LEGACY_IO_RESOURCE;
dev->resource[1].start = 0x3F6;
dev->resource[1].end = 0x3F6;
dev->resource[1].flags = LEGACY_IO_RESOURCE;
}
if ((progif & 4) == 0)
{
dev->resource[2].start = 0x170;
dev->resource[2].end = 0x177;
dev->resource[2].flags = LEGACY_IO_RESOURCE;
dev->resource[3].start = 0x376;
dev->resource[3].end = 0x376;
dev->resource[3].flags = LEGACY_IO_RESOURCE;
};
}
break;
 
case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
if (class != PCI_CLASS_BRIDGE_PCI)
goto bad;
/* The PCI-to-PCI bridge spec requires that subtractive
decoding (i.e. transparent) bridge must have programming
interface code of 0x01. */
pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
break;
 
case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
if (class != PCI_CLASS_BRIDGE_CARDBUS)
goto bad;
pci_read_irq(dev);
pci_read_bases(dev, 1, 0);
dev->subsystem_vendor = PciRead16(dev->busnr,
dev->devfn,
PCI_CB_SUBSYSTEM_VENDOR_ID);
 
dev->subsystem_device = PciRead16(dev->busnr,
dev->devfn,
PCI_CB_SUBSYSTEM_ID);
break;
 
default: /* unknown header */
printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
pci_name(dev), dev->hdr_type);
return -1;
 
bad:
printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
pci_name(dev), class, dev->hdr_type);
dev->class = PCI_CLASS_NOT_DEFINED;
}
 
/* We found a fine healthy device, go go go... */
 
return 0;
};
 
static pci_dev_t* pci_scan_device(u32_t busnr, int devfn)
{
pci_dev_t *dev;
 
u32_t id;
u8_t hdr;
 
int timeout = 10;
 
id = PciRead32(busnr, devfn, PCI_VENDOR_ID);
 
/* some broken boards return 0 or ~0 if a slot is empty: */
if (id == 0xffffffff || id == 0x00000000 ||
id == 0x0000ffff || id == 0xffff0000)
return NULL;
 
while (id == 0xffff0001)
{
 
delay(timeout/10);
timeout *= 2;
 
id = PciRead32(busnr, devfn, PCI_VENDOR_ID);
 
/* Card hasn't responded in 60 seconds? Must be stuck. */
if (timeout > 60 * 100)
{
printk(KERN_WARNING "Device %04x:%02x:%02x.%d not "
"responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn));
return NULL;
}
};
 
if( pci_scan_filter(id, busnr, devfn) == 0)
return NULL;
 
hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE);
 
dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0);
if(unlikely(dev == NULL))
return NULL;
 
INIT_LIST_HEAD(&dev->link);
 
 
dev->pci_dev.busnr = busnr;
dev->pci_dev.devfn = devfn;
dev->pci_dev.hdr_type = hdr & 0x7f;
dev->pci_dev.multifunction = !!(hdr & 0x80);
dev->pci_dev.vendor = id & 0xffff;
dev->pci_dev.device = (id >> 16) & 0xffff;
 
pci_setup_device(&dev->pci_dev);
 
return dev;
 
};
 
 
 
 
int pci_scan_slot(u32_t bus, int devfn)
{
int func, nr = 0;
 
for (func = 0; func < 8; func++, devfn++)
{
pci_dev_t *dev;
 
dev = pci_scan_device(bus, devfn);
if( dev )
{
list_add(&dev->link, &devices);
 
nr++;
 
/*
* If this is a single function device,
* don't scan past the first function.
*/
if (!dev->pci_dev.multifunction)
{
if (func > 0) {
dev->pci_dev.multifunction = 1;
}
else {
break;
}
}
}
else {
if (func == 0)
break;
}
};
 
return nr;
};
 
#define PCI_FIND_CAP_TTL 48
 
static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn,
u8 pos, int cap, int *ttl)
{
u8 id;
 
while ((*ttl)--) {
pos = PciRead8(bus, devfn, pos);
if (pos < 0x40)
break;
pos &= ~3;
id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos += PCI_CAP_LIST_NEXT;
}
return 0;
}
 
static int __pci_find_next_cap(unsigned int bus, unsigned int devfn,
u8 pos, int cap)
{
int ttl = PCI_FIND_CAP_TTL;
 
return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
}
 
static int __pci_bus_find_cap_start(unsigned int bus,
unsigned int devfn, u8 hdr_type)
{
u16 status;
 
status = PciRead16(bus, devfn, PCI_STATUS);
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
 
switch (hdr_type) {
case PCI_HEADER_TYPE_NORMAL:
case PCI_HEADER_TYPE_BRIDGE:
return PCI_CAPABILITY_LIST;
case PCI_HEADER_TYPE_CARDBUS:
return PCI_CB_CAPABILITY_LIST;
default:
return 0;
}
 
return 0;
}
 
 
int pci_find_capability(struct pci_dev *dev, int cap)
{
int pos;
 
pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type);
if (pos)
pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap);
 
return pos;
}
 
 
 
 
int enum_pci_devices()
{
pci_dev_t *dev;
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
 
 
last_bus = PciApi(1);
 
 
if( unlikely(last_bus == -1))
return -1;
 
for(;bus <= last_bus; bus++)
{
for (devfn = 0; devfn < 0x100; devfn += 8)
pci_scan_slot(bus, devfn);
 
 
}
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
dbgprintf("PCI device %x:%x bus:%x devfn:%x\n",
dev->pci_dev.vendor,
dev->pci_dev.device,
dev->pci_dev.busnr,
dev->pci_dev.devfn);
 
}
return 0;
}
 
const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist)
{
pci_dev_t *dev;
const struct pci_device_id *ent;
 
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( dev->pci_dev.vendor != idlist->vendor )
continue;
 
for(ent = idlist; ent->vendor != 0; ent++)
{
if(unlikely(ent->device == dev->pci_dev.device))
{
pdev->pci_dev = dev->pci_dev;
return ent;
}
};
}
 
return NULL;
};
 
struct pci_dev *
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
{
pci_dev_t *dev;
 
dev = (pci_dev_t*)devices.next;
 
if(from != NULL)
{
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( &dev->pci_dev == from)
{
dev = (pci_dev_t*)dev->link.next;
break;
};
}
};
 
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( dev->pci_dev.vendor != vendor )
continue;
 
if(dev->pci_dev.device == device)
{
return &dev->pci_dev;
}
}
return NULL;
};
 
 
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
{
pci_dev_t *dev;
 
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn)
return &dev->pci_dev;
}
return NULL;
}
 
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
{
pci_dev_t *dev;
 
dev = (pci_dev_t*)devices.next;
 
if(from != NULL)
{
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( &dev->pci_dev == from)
{
dev = (pci_dev_t*)dev->link.next;
break;
};
}
};
 
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( dev->pci_dev.class == class)
{
return &dev->pci_dev;
}
}
 
return NULL;
}
 
 
#define PIO_OFFSET 0x10000UL
#define PIO_MASK 0x0ffffUL
#define PIO_RESERVED 0x40000UL
 
#define IO_COND(addr, is_pio, is_mmio) do { \
unsigned long port = (unsigned long __force)addr; \
if (port >= PIO_RESERVED) { \
is_mmio; \
} else if (port > PIO_OFFSET) { \
port &= PIO_MASK; \
is_pio; \
}; \
} while (0)
 
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
if (port > PIO_MASK)
return NULL;
return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
}
 
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
resource_size_t start = pci_resource_start(dev, bar);
resource_size_t len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
 
if (!len || !start)
return NULL;
if (maxlen && len > maxlen)
len = maxlen;
if (flags & IORESOURCE_IO)
return ioport_map(start, len);
if (flags & IORESOURCE_MEM) {
return ioremap(start, len);
}
/* What? */
return NULL;
}
 
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
IO_COND(addr, /* nothing */, iounmap(addr));
}
 
 
struct pci_bus_region {
resource_size_t start;
resource_size_t end;
};
 
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
region->start = res->start;
region->end = res->end;
}
 
 
int pci_enable_rom(struct pci_dev *pdev)
{
struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
struct pci_bus_region region;
u32 rom_addr;
 
if (!res->flags)
return -1;
 
pcibios_resource_to_bus(pdev, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
return 0;
}
 
void pci_disable_rom(struct pci_dev *pdev)
{
u32 rom_addr;
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
}
 
/**
* pci_get_rom_size - obtain the actual size of the ROM image
* @pdev: target PCI device
* @rom: kernel virtual pointer to image of ROM
* @size: size of PCI window
* return: size of actual ROM image
*
* Determine the actual length of the ROM image.
* The PCI window size could be much larger than the
* actual image size.
*/
size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
{
void __iomem *image;
int last_image;
 
image = rom;
do {
void __iomem *pds;
/* Standard PCI ROMs start out with these bytes 55 AA */
if (readb(image) != 0x55) {
dev_err(&pdev->dev, "Invalid ROM contents\n");
break;
}
if (readb(image + 1) != 0xAA)
break;
/* get the PCI data structure and check its signature */
pds = image + readw(image + 24);
if (readb(pds) != 'P')
break;
if (readb(pds + 1) != 'C')
break;
if (readb(pds + 2) != 'I')
break;
if (readb(pds + 3) != 'R')
break;
last_image = readb(pds + 21) & 0x80;
/* this length is reliable */
image += readw(pds + 16) * 512;
} while (!last_image);
 
/* never return a size larger than the PCI resource window */
/* there are known ROMs that get the size wrong */
return min((size_t)(image - rom), size);
}
 
 
/**
* pci_map_rom - map a PCI ROM to kernel space
* @pdev: pointer to pci device struct
* @size: pointer to receive size of pci window over ROM
*
* Return: kernel virtual pointer to image of ROM
*
* Map a PCI ROM into kernel space. If ROM is boot video ROM,
* the shadow BIOS copy will be returned instead of the
* actual ROM.
*/
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
loff_t start;
void __iomem *rom;
 
/*
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
* memory map if the VGA enable bit of the Bridge Control register is
* set for embedded VGA.
*/
if (res->flags & IORESOURCE_ROM_SHADOW) {
/* primary video rom always starts here */
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
} else {
if (res->flags &
(IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
return (void __iomem *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
/* assign the ROM an address if it doesn't have one */
// if (res->parent == NULL &&
// pci_assign_resource(pdev,PCI_ROM_RESOURCE))
return NULL;
// start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
// *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
// if (*size == 0)
// return NULL;
 
/* Enable ROM space decodes */
// if (pci_enable_rom(pdev))
// return NULL;
}
}
 
rom = ioremap(start, *size);
if (!rom) {
/* restore enable if ioremap fails */
if (!(res->flags & (IORESOURCE_ROM_ENABLE |
IORESOURCE_ROM_SHADOW |
IORESOURCE_ROM_COPY)))
pci_disable_rom(pdev);
return NULL;
}
 
/*
* Try to find the true size of the ROM since sometimes the PCI window
* size is much larger than the actual size of the ROM.
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(pdev, rom, *size);
return rom;
}
 
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
 
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
return;
 
iounmap(rom);
 
/* Disable again before continuing, leave enabled if pci=rom */
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
pci_disable_rom(pdev);
}
 
#if 0
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
 
/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
if (pci_is_pcie(dev))
return;
 
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat < 16)
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
else if (lat > pcibios_max_latency)
lat = pcibios_max_latency;
else
return;
dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
#endif
 
 
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
 
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
if (enable)
cmd = old_cmd | PCI_COMMAND_MASTER;
else
cmd = old_cmd & ~PCI_COMMAND_MASTER;
if (cmd != old_cmd) {
dbgprintf("%s bus mastering\n",
enable ? "enabling" : "disabling");
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
dev->is_busmaster = enable;
}
 
void pci_set_master(struct pci_dev *dev)
{
__pci_set_master(dev, true);
// pcibios_set_master(dev);
}
 
 
 
/drivers/video/drm/vmwgfx/svga3d_reg.h
0,0 → 1,1896
/**********************************************************
* Copyright 1998-2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga3d_reg.h --
*
* SVGA 3D hardware definitions
*/
 
#ifndef _SVGA3D_REG_H_
#define _SVGA3D_REG_H_
 
#include "svga_reg.h"
 
 
/*
* 3D Hardware Version
*
* The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
* register. Is set by the host and read by the guest. This lets
* us make new guest drivers which are backwards-compatible with old
* SVGA hardware revisions. It does not let us support old guest
* drivers. Good enough for now.
*
*/
 
#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
 
typedef enum {
SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1),
SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1,
} SVGA3dHardwareVersion;
 
/*
* Generic Types
*/
 
typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
#define SVGA3D_NUM_CLIPPLANES 6
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
#define SVGA3D_MAX_CONTEXT_IDS 256
#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
 
/*
* Surface formats.
*
* If you modify this list, be sure to keep GLUtil.c in sync. It
* includes the internal format definition of each surface in
* GLUtil_ConvertSurfaceFormat, and it contains a table of
* human-readable names in GLUtil_GetFormatName.
*/
 
typedef enum SVGA3dSurfaceFormat {
SVGA3D_FORMAT_INVALID = 0,
 
SVGA3D_X8R8G8B8 = 1,
SVGA3D_A8R8G8B8 = 2,
 
SVGA3D_R5G6B5 = 3,
SVGA3D_X1R5G5B5 = 4,
SVGA3D_A1R5G5B5 = 5,
SVGA3D_A4R4G4B4 = 6,
 
SVGA3D_Z_D32 = 7,
SVGA3D_Z_D16 = 8,
SVGA3D_Z_D24S8 = 9,
SVGA3D_Z_D15S1 = 10,
 
SVGA3D_LUMINANCE8 = 11,
SVGA3D_LUMINANCE4_ALPHA4 = 12,
SVGA3D_LUMINANCE16 = 13,
SVGA3D_LUMINANCE8_ALPHA8 = 14,
 
SVGA3D_DXT1 = 15,
SVGA3D_DXT2 = 16,
SVGA3D_DXT3 = 17,
SVGA3D_DXT4 = 18,
SVGA3D_DXT5 = 19,
 
SVGA3D_BUMPU8V8 = 20,
SVGA3D_BUMPL6V5U5 = 21,
SVGA3D_BUMPX8L8V8U8 = 22,
SVGA3D_BUMPL8V8U8 = 23,
 
SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
 
SVGA3D_A2R10G10B10 = 26,
 
/* signed formats */
SVGA3D_V8U8 = 27,
SVGA3D_Q8W8V8U8 = 28,
SVGA3D_CxV8U8 = 29,
 
/* mixed formats */
SVGA3D_X8L8V8U8 = 30,
SVGA3D_A2W10V10U10 = 31,
 
SVGA3D_ALPHA8 = 32,
 
/* Single- and dual-component floating point formats */
SVGA3D_R_S10E5 = 33,
SVGA3D_R_S23E8 = 34,
SVGA3D_RG_S10E5 = 35,
SVGA3D_RG_S23E8 = 36,
 
/*
* Any surface can be used as a buffer object, but SVGA3D_BUFFER is
* the most efficient format to use when creating new surfaces
* expressly for index or vertex data.
*/
 
SVGA3D_BUFFER = 37,
 
SVGA3D_Z_D24X8 = 38,
 
SVGA3D_V16U16 = 39,
 
SVGA3D_G16R16 = 40,
SVGA3D_A16B16G16R16 = 41,
 
/* Packed Video formats */
SVGA3D_UYVY = 42,
SVGA3D_YUY2 = 43,
 
/* Planar video formats */
SVGA3D_NV12 = 44,
 
/* Video format with alpha */
SVGA3D_AYUV = 45,
 
SVGA3D_BC4_UNORM = 108,
SVGA3D_BC5_UNORM = 111,
 
/* Advanced D3D9 depth formats. */
SVGA3D_Z_DF16 = 118,
SVGA3D_Z_DF24 = 119,
SVGA3D_Z_D24S8_INT = 120,
 
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
 
typedef uint32 SVGA3dColor; /* a, r, g, b */
 
/*
* These match the D3DFORMAT_OP definitions used by Direct3D. We need
* them so that we can query the host for what the supported surface
* operations are (when we're using the D3D backend, in particular),
* and so we can send those operations to the guest.
*/
typedef enum {
SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
 
/*
* This format can be used as a render target if the current display mode
* is the same depth if the alpha channel is ignored. e.g. if the device
* can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
* format op list entry for A8R8G8B8 should have this cap.
*/
SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
 
/*
* This format contains DirectDraw support (including Flip). This flag
* should not to be set on alpha formats.
*/
SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
 
/*
* The rasterizer can support some level of Direct3D support in this format
* and implies that the driver can create a Context in this mode (for some
* render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
* flag must also be set.
*/
SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
 
/*
* This is set for a private format when the driver has put the bpp in
* the structure.
*/
SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
 
/*
* Indicates that this format can be converted to any RGB format for which
* SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
*/
SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
 
/*
* Indicates that this format can be used to create offscreen plain surfaces.
*/
SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
 
/*
* Indicated that this format can be read as an SRGB texture (meaning that the
* sampler will linearize the looked up data)
*/
SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
 
/*
* Indicates that this format can be used in the bumpmap instructions
*/
SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
 
/*
* Indicates that this format can be sampled by the displacement map sampler
*/
SVGA3DFORMAT_OP_DMAP = 0x00020000,
 
/*
* Indicates that this format cannot be used with texture filtering
*/
SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
 
/*
* Indicates that format conversions are supported to this RGB format if
* SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
*/
SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
 
/*
* Indicated that this format can be written as an SRGB target (meaning that the
* pixel pipe will DE-linearize data on output to format)
*/
SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
 
/*
* Indicates that this format cannot be used with alpha blending
*/
SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
 
/*
* Indicates that the device can auto-generated sublevels for resources
* of this format
*/
SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
 
/*
* Indicates that this format can be used by vertex texture sampler
*/
SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
 
/*
* Indicates that this format supports neither texture coordinate wrap
* modes, nor mipmapping
*/
SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
} SVGA3dFormatOp;
 
/*
* This structure is a conversion of SVGA3DFORMAT_OP_*.
* Entries must be located at the same position.
*/
typedef union {
uint32 value;
struct {
uint32 texture : 1;
uint32 volumeTexture : 1;
uint32 cubeTexture : 1;
uint32 offscreenRenderTarget : 1;
uint32 sameFormatRenderTarget : 1;
uint32 unknown1 : 1;
uint32 zStencil : 1;
uint32 zStencilArbitraryDepth : 1;
uint32 sameFormatUpToAlpha : 1;
uint32 unknown2 : 1;
uint32 displayMode : 1;
uint32 acceleration3d : 1;
uint32 pixelSize : 1;
uint32 convertToARGB : 1;
uint32 offscreenPlain : 1;
uint32 sRGBRead : 1;
uint32 bumpMap : 1;
uint32 dmap : 1;
uint32 noFilter : 1;
uint32 memberOfGroupARGB : 1;
uint32 sRGBWrite : 1;
uint32 noAlphaBlend : 1;
uint32 autoGenMipMap : 1;
uint32 vertexTexture : 1;
uint32 noTexCoordWrapNorMip : 1;
};
} SVGA3dSurfaceFormatCaps;
 
/*
* SVGA_3D_CMD_SETRENDERSTATE Types. All value types
* must fit in a uint32.
*/
 
typedef enum {
SVGA3D_RS_INVALID = 0,
SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
SVGA3D_RS_STENCILREF = 13, /* uint32 */
SVGA3D_RS_STENCILMASK = 14, /* uint32 */
SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
SVGA3D_RS_FOGSTART = 16, /* float */
SVGA3D_RS_FOGEND = 17, /* float */
SVGA3D_RS_FOGDENSITY = 18, /* float */
SVGA3D_RS_POINTSIZE = 19, /* float */
SVGA3D_RS_POINTSIZEMIN = 20, /* float */
SVGA3D_RS_POINTSIZEMAX = 21, /* float */
SVGA3D_RS_POINTSCALE_A = 22, /* float */
SVGA3D_RS_POINTSCALE_B = 23, /* float */
SVGA3D_RS_POINTSCALE_C = 24, /* float */
SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
SVGA3D_RS_ZBIAS = 45, /* float */
SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
SVGA3D_RS_DEPTHBIAS = 64, /* float */
 
 
/*
* Output Gamma Level
*
* Output gamma effects the gamma curve of colors that are output from the
* rendering pipeline. A value of 1.0 specifies a linear color space. If the
* value is <= 0.0, gamma correction is ignored and linear color space is
* used.
*/
 
SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
SVGA3D_RS_TWEENFACTOR = 88, /* float */
SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */
SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */
SVGA3D_RS_LINEWIDTH = 99, /* float */
SVGA3D_RS_MAX
} SVGA3dRenderStateName;
 
typedef enum {
SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0,
SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1,
SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2,
SVGA3D_TRANSPARENCYANTIALIAS_MAX
} SVGA3dTransparencyAntialiasType;
 
typedef enum {
SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
} SVGA3dVertexMaterial;
 
typedef enum {
SVGA3D_FILLMODE_INVALID = 0,
SVGA3D_FILLMODE_POINT = 1,
SVGA3D_FILLMODE_LINE = 2,
SVGA3D_FILLMODE_FILL = 3,
SVGA3D_FILLMODE_MAX
} SVGA3dFillModeType;
 
 
typedef
union {
struct {
uint16 mode; /* SVGA3dFillModeType */
uint16 face; /* SVGA3dFace */
};
uint32 uintValue;
} SVGA3dFillMode;
 
typedef enum {
SVGA3D_SHADEMODE_INVALID = 0,
SVGA3D_SHADEMODE_FLAT = 1,
SVGA3D_SHADEMODE_SMOOTH = 2,
SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
SVGA3D_SHADEMODE_MAX
} SVGA3dShadeMode;
 
typedef
union {
struct {
uint16 repeat;
uint16 pattern;
};
uint32 uintValue;
} SVGA3dLinePattern;
 
typedef enum {
SVGA3D_BLENDOP_INVALID = 0,
SVGA3D_BLENDOP_ZERO = 1,
SVGA3D_BLENDOP_ONE = 2,
SVGA3D_BLENDOP_SRCCOLOR = 3,
SVGA3D_BLENDOP_INVSRCCOLOR = 4,
SVGA3D_BLENDOP_SRCALPHA = 5,
SVGA3D_BLENDOP_INVSRCALPHA = 6,
SVGA3D_BLENDOP_DESTALPHA = 7,
SVGA3D_BLENDOP_INVDESTALPHA = 8,
SVGA3D_BLENDOP_DESTCOLOR = 9,
SVGA3D_BLENDOP_INVDESTCOLOR = 10,
SVGA3D_BLENDOP_SRCALPHASAT = 11,
SVGA3D_BLENDOP_BLENDFACTOR = 12,
SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
SVGA3D_BLENDOP_MAX
} SVGA3dBlendOp;
 
typedef enum {
SVGA3D_BLENDEQ_INVALID = 0,
SVGA3D_BLENDEQ_ADD = 1,
SVGA3D_BLENDEQ_SUBTRACT = 2,
SVGA3D_BLENDEQ_REVSUBTRACT = 3,
SVGA3D_BLENDEQ_MINIMUM = 4,
SVGA3D_BLENDEQ_MAXIMUM = 5,
SVGA3D_BLENDEQ_MAX
} SVGA3dBlendEquation;
 
typedef enum {
SVGA3D_FRONTWINDING_INVALID = 0,
SVGA3D_FRONTWINDING_CW = 1,
SVGA3D_FRONTWINDING_CCW = 2,
SVGA3D_FRONTWINDING_MAX
} SVGA3dFrontWinding;
 
typedef enum {
SVGA3D_FACE_INVALID = 0,
SVGA3D_FACE_NONE = 1,
SVGA3D_FACE_FRONT = 2,
SVGA3D_FACE_BACK = 3,
SVGA3D_FACE_FRONT_BACK = 4,
SVGA3D_FACE_MAX
} SVGA3dFace;
 
/*
* The order and the values should not be changed
*/
 
typedef enum {
SVGA3D_CMP_INVALID = 0,
SVGA3D_CMP_NEVER = 1,
SVGA3D_CMP_LESS = 2,
SVGA3D_CMP_EQUAL = 3,
SVGA3D_CMP_LESSEQUAL = 4,
SVGA3D_CMP_GREATER = 5,
SVGA3D_CMP_NOTEQUAL = 6,
SVGA3D_CMP_GREATEREQUAL = 7,
SVGA3D_CMP_ALWAYS = 8,
SVGA3D_CMP_MAX
} SVGA3dCmpFunc;
 
/*
* SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
* the fog factor to be specified in the alpha component of the specular
* (a.k.a. secondary) vertex color.
*/
typedef enum {
SVGA3D_FOGFUNC_INVALID = 0,
SVGA3D_FOGFUNC_EXP = 1,
SVGA3D_FOGFUNC_EXP2 = 2,
SVGA3D_FOGFUNC_LINEAR = 3,
SVGA3D_FOGFUNC_PER_VERTEX = 4
} SVGA3dFogFunction;
 
/*
* SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
* or per-pixel basis.
*/
typedef enum {
SVGA3D_FOGTYPE_INVALID = 0,
SVGA3D_FOGTYPE_VERTEX = 1,
SVGA3D_FOGTYPE_PIXEL = 2,
SVGA3D_FOGTYPE_MAX = 3
} SVGA3dFogType;
 
/*
* SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
* computed using the eye Z value of each pixel (or vertex), whereas range-
* based fog is computed using the actual distance (range) to the eye.
*/
typedef enum {
SVGA3D_FOGBASE_INVALID = 0,
SVGA3D_FOGBASE_DEPTHBASED = 1,
SVGA3D_FOGBASE_RANGEBASED = 2,
SVGA3D_FOGBASE_MAX = 3
} SVGA3dFogBase;
 
typedef enum {
SVGA3D_STENCILOP_INVALID = 0,
SVGA3D_STENCILOP_KEEP = 1,
SVGA3D_STENCILOP_ZERO = 2,
SVGA3D_STENCILOP_REPLACE = 3,
SVGA3D_STENCILOP_INCRSAT = 4,
SVGA3D_STENCILOP_DECRSAT = 5,
SVGA3D_STENCILOP_INVERT = 6,
SVGA3D_STENCILOP_INCR = 7,
SVGA3D_STENCILOP_DECR = 8,
SVGA3D_STENCILOP_MAX
} SVGA3dStencilOp;
 
typedef enum {
SVGA3D_CLIPPLANE_0 = (1 << 0),
SVGA3D_CLIPPLANE_1 = (1 << 1),
SVGA3D_CLIPPLANE_2 = (1 << 2),
SVGA3D_CLIPPLANE_3 = (1 << 3),
SVGA3D_CLIPPLANE_4 = (1 << 4),
SVGA3D_CLIPPLANE_5 = (1 << 5),
} SVGA3dClipPlanes;
 
typedef enum {
SVGA3D_CLEAR_COLOR = 0x1,
SVGA3D_CLEAR_DEPTH = 0x2,
SVGA3D_CLEAR_STENCIL = 0x4
} SVGA3dClearFlag;
 
typedef enum {
SVGA3D_RT_DEPTH = 0,
SVGA3D_RT_STENCIL = 1,
SVGA3D_RT_COLOR0 = 2,
SVGA3D_RT_COLOR1 = 3,
SVGA3D_RT_COLOR2 = 4,
SVGA3D_RT_COLOR3 = 5,
SVGA3D_RT_COLOR4 = 6,
SVGA3D_RT_COLOR5 = 7,
SVGA3D_RT_COLOR6 = 8,
SVGA3D_RT_COLOR7 = 9,
SVGA3D_RT_MAX,
SVGA3D_RT_INVALID = ((uint32)-1),
} SVGA3dRenderTargetType;
 
#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
 
typedef
union {
struct {
uint32 red : 1;
uint32 green : 1;
uint32 blue : 1;
uint32 alpha : 1;
};
uint32 uintValue;
} SVGA3dColorMask;
 
typedef enum {
SVGA3D_VBLEND_DISABLE = 0,
SVGA3D_VBLEND_1WEIGHT = 1,
SVGA3D_VBLEND_2WEIGHT = 2,
SVGA3D_VBLEND_3WEIGHT = 3,
} SVGA3dVertexBlendFlags;
 
typedef enum {
SVGA3D_WRAPCOORD_0 = 1 << 0,
SVGA3D_WRAPCOORD_1 = 1 << 1,
SVGA3D_WRAPCOORD_2 = 1 << 2,
SVGA3D_WRAPCOORD_3 = 1 << 3,
SVGA3D_WRAPCOORD_ALL = 0xF,
} SVGA3dWrapFlags;
 
/*
* SVGA_3D_CMD_TEXTURESTATE Types. All value types
* must fit in a uint32.
*/
 
typedef enum {
SVGA3D_TS_INVALID = 0,
SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
 
 
/*
* Sampler Gamma Level
*
* Sampler gamma effects the color of samples taken from the sampler. A
* value of 1.0 will produce linear samples. If the value is <= 0.0 the
* gamma value is ignored and a linear space is used.
*/
 
SVGA3D_TS_GAMMA = 25, /* float */
SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
SVGA3D_TS_MAX
} SVGA3dTextureStateName;
 
typedef enum {
SVGA3D_TC_INVALID = 0,
SVGA3D_TC_DISABLE = 1,
SVGA3D_TC_SELECTARG1 = 2,
SVGA3D_TC_SELECTARG2 = 3,
SVGA3D_TC_MODULATE = 4,
SVGA3D_TC_ADD = 5,
SVGA3D_TC_ADDSIGNED = 6,
SVGA3D_TC_SUBTRACT = 7,
SVGA3D_TC_BLENDTEXTUREALPHA = 8,
SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
SVGA3D_TC_BLENDCURRENTALPHA = 10,
SVGA3D_TC_BLENDFACTORALPHA = 11,
SVGA3D_TC_MODULATE2X = 12,
SVGA3D_TC_MODULATE4X = 13,
SVGA3D_TC_DSDT = 14,
SVGA3D_TC_DOTPRODUCT3 = 15,
SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
SVGA3D_TC_ADDSIGNED2X = 17,
SVGA3D_TC_ADDSMOOTH = 18,
SVGA3D_TC_PREMODULATE = 19,
SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
SVGA3D_TC_MULTIPLYADD = 25,
SVGA3D_TC_LERP = 26,
SVGA3D_TC_MAX
} SVGA3dTextureCombiner;
 
#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
 
typedef enum {
SVGA3D_TEX_ADDRESS_INVALID = 0,
SVGA3D_TEX_ADDRESS_WRAP = 1,
SVGA3D_TEX_ADDRESS_MIRROR = 2,
SVGA3D_TEX_ADDRESS_CLAMP = 3,
SVGA3D_TEX_ADDRESS_BORDER = 4,
SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
SVGA3D_TEX_ADDRESS_EDGE = 6,
SVGA3D_TEX_ADDRESS_MAX
} SVGA3dTextureAddress;
 
/*
* SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
* disabled, and the rasterizer should use the magnification filter instead.
*/
typedef enum {
SVGA3D_TEX_FILTER_NONE = 0,
SVGA3D_TEX_FILTER_NEAREST = 1,
SVGA3D_TEX_FILTER_LINEAR = 2,
SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */
SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */
SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */
SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */
SVGA3D_TEX_FILTER_MAX
} SVGA3dTextureFilter;
 
typedef enum {
SVGA3D_TEX_TRANSFORM_OFF = 0,
SVGA3D_TEX_TRANSFORM_S = (1 << 0),
SVGA3D_TEX_TRANSFORM_T = (1 << 1),
SVGA3D_TEX_TRANSFORM_R = (1 << 2),
SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
SVGA3D_TEX_PROJECTED = (1 << 15),
} SVGA3dTexTransformFlags;
 
typedef enum {
SVGA3D_TEXCOORD_GEN_OFF = 0,
SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
SVGA3D_TEXCOORD_GEN_SPHERE = 4,
SVGA3D_TEXCOORD_GEN_MAX
} SVGA3dTextureCoordGen;
 
/*
* Texture argument constants for texture combiner
*/
typedef enum {
SVGA3D_TA_INVALID = 0,
SVGA3D_TA_CONSTANT = 1,
SVGA3D_TA_PREVIOUS = 2,
SVGA3D_TA_DIFFUSE = 3,
SVGA3D_TA_TEXTURE = 4,
SVGA3D_TA_SPECULAR = 5,
SVGA3D_TA_MAX
} SVGA3dTextureArgData;
 
#define SVGA3D_TM_MASK_LEN 4
 
/* Modifiers for texture argument constants defined above. */
typedef enum {
SVGA3D_TM_NONE = 0,
SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
} SVGA3dTextureArgModifier;
 
#define SVGA3D_INVALID_ID ((uint32)-1)
#define SVGA3D_MAX_CLIP_PLANES 6
 
/*
* This is the limit to the number of fixed-function texture
* transforms and texture coordinates we can support. It does *not*
* correspond to the number of texture image units (samplers) we
* support!
*/
#define SVGA3D_MAX_TEXTURE_COORDS 8
 
/*
* Vertex declarations
*
* Notes:
*
* SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
* draw with any POSITIONT vertex arrays, the programmable vertex
* pipeline will be implicitly disabled. Drawing will take place as if
* no vertex shader was bound.
*/
 
typedef enum {
SVGA3D_DECLUSAGE_POSITION = 0,
SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */
SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */
SVGA3D_DECLUSAGE_NORMAL, /* 3 */
SVGA3D_DECLUSAGE_PSIZE, /* 4 */
SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */
SVGA3D_DECLUSAGE_TANGENT, /* 6 */
SVGA3D_DECLUSAGE_BINORMAL, /* 7 */
SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */
SVGA3D_DECLUSAGE_POSITIONT, /* 9 */
SVGA3D_DECLUSAGE_COLOR, /* 10 */
SVGA3D_DECLUSAGE_FOG, /* 11 */
SVGA3D_DECLUSAGE_DEPTH, /* 12 */
SVGA3D_DECLUSAGE_SAMPLE, /* 13 */
SVGA3D_DECLUSAGE_MAX
} SVGA3dDeclUsage;
 
typedef enum {
SVGA3D_DECLMETHOD_DEFAULT = 0,
SVGA3D_DECLMETHOD_PARTIALU,
SVGA3D_DECLMETHOD_PARTIALV,
SVGA3D_DECLMETHOD_CROSSUV, /* Normal */
SVGA3D_DECLMETHOD_UV,
SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */
SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
} SVGA3dDeclMethod;
 
typedef enum {
SVGA3D_DECLTYPE_FLOAT1 = 0,
SVGA3D_DECLTYPE_FLOAT2 = 1,
SVGA3D_DECLTYPE_FLOAT3 = 2,
SVGA3D_DECLTYPE_FLOAT4 = 3,
SVGA3D_DECLTYPE_D3DCOLOR = 4,
SVGA3D_DECLTYPE_UBYTE4 = 5,
SVGA3D_DECLTYPE_SHORT2 = 6,
SVGA3D_DECLTYPE_SHORT4 = 7,
SVGA3D_DECLTYPE_UBYTE4N = 8,
SVGA3D_DECLTYPE_SHORT2N = 9,
SVGA3D_DECLTYPE_SHORT4N = 10,
SVGA3D_DECLTYPE_USHORT2N = 11,
SVGA3D_DECLTYPE_USHORT4N = 12,
SVGA3D_DECLTYPE_UDEC3 = 13,
SVGA3D_DECLTYPE_DEC3N = 14,
SVGA3D_DECLTYPE_FLOAT16_2 = 15,
SVGA3D_DECLTYPE_FLOAT16_4 = 16,
SVGA3D_DECLTYPE_MAX,
} SVGA3dDeclType;
 
/*
* This structure is used for the divisor for geometry instancing;
* it's a direct translation of the Direct3D equivalent.
*/
typedef union {
struct {
/*
* For index data, this number represents the number of instances to draw.
* For instance data, this number represents the number of
* instances/vertex in this stream
*/
uint32 count : 30;
 
/*
* This is 1 if this is supposed to be the data that is repeated for
* every instance.
*/
uint32 indexedData : 1;
 
/*
* This is 1 if this is supposed to be the per-instance data.
*/
uint32 instanceData : 1;
};
 
uint32 value;
} SVGA3dVertexDivisor;
 
typedef enum {
SVGA3D_PRIMITIVE_INVALID = 0,
SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
SVGA3D_PRIMITIVE_POINTLIST = 2,
SVGA3D_PRIMITIVE_LINELIST = 3,
SVGA3D_PRIMITIVE_LINESTRIP = 4,
SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
SVGA3D_PRIMITIVE_MAX
} SVGA3dPrimitiveType;
 
typedef enum {
SVGA3D_COORDINATE_INVALID = 0,
SVGA3D_COORDINATE_LEFTHANDED = 1,
SVGA3D_COORDINATE_RIGHTHANDED = 2,
SVGA3D_COORDINATE_MAX
} SVGA3dCoordinateType;
 
typedef enum {
SVGA3D_TRANSFORM_INVALID = 0,
SVGA3D_TRANSFORM_WORLD = 1,
SVGA3D_TRANSFORM_VIEW = 2,
SVGA3D_TRANSFORM_PROJECTION = 3,
SVGA3D_TRANSFORM_TEXTURE0 = 4,
SVGA3D_TRANSFORM_TEXTURE1 = 5,
SVGA3D_TRANSFORM_TEXTURE2 = 6,
SVGA3D_TRANSFORM_TEXTURE3 = 7,
SVGA3D_TRANSFORM_TEXTURE4 = 8,
SVGA3D_TRANSFORM_TEXTURE5 = 9,
SVGA3D_TRANSFORM_TEXTURE6 = 10,
SVGA3D_TRANSFORM_TEXTURE7 = 11,
SVGA3D_TRANSFORM_WORLD1 = 12,
SVGA3D_TRANSFORM_WORLD2 = 13,
SVGA3D_TRANSFORM_WORLD3 = 14,
SVGA3D_TRANSFORM_MAX
} SVGA3dTransformType;
 
typedef enum {
SVGA3D_LIGHTTYPE_INVALID = 0,
SVGA3D_LIGHTTYPE_POINT = 1,
SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
SVGA3D_LIGHTTYPE_MAX
} SVGA3dLightType;
 
typedef enum {
SVGA3D_CUBEFACE_POSX = 0,
SVGA3D_CUBEFACE_NEGX = 1,
SVGA3D_CUBEFACE_POSY = 2,
SVGA3D_CUBEFACE_NEGY = 3,
SVGA3D_CUBEFACE_POSZ = 4,
SVGA3D_CUBEFACE_NEGZ = 5,
} SVGA3dCubeFace;
 
typedef enum {
SVGA3D_SHADERTYPE_VS = 1,
SVGA3D_SHADERTYPE_PS = 2,
SVGA3D_SHADERTYPE_MAX
} SVGA3dShaderType;
 
typedef enum {
SVGA3D_CONST_TYPE_FLOAT = 0,
SVGA3D_CONST_TYPE_INT = 1,
SVGA3D_CONST_TYPE_BOOL = 2,
} SVGA3dShaderConstType;
 
#define SVGA3D_MAX_SURFACE_FACES 6
 
typedef enum {
SVGA3D_STRETCH_BLT_POINT = 0,
SVGA3D_STRETCH_BLT_LINEAR = 1,
SVGA3D_STRETCH_BLT_MAX
} SVGA3dStretchBltMode;
 
typedef enum {
SVGA3D_QUERYTYPE_OCCLUSION = 0,
SVGA3D_QUERYTYPE_MAX
} SVGA3dQueryType;
 
typedef enum {
SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
} SVGA3dQueryState;
 
typedef enum {
SVGA3D_WRITE_HOST_VRAM = 1,
SVGA3D_READ_HOST_VRAM = 2,
} SVGA3dTransferType;
 
/*
* The maximum number of vertex arrays we're guaranteed to support in
* SVGA_3D_CMD_DRAWPRIMITIVES.
*/
#define SVGA3D_MAX_VERTEX_ARRAYS 32
 
/*
* The maximum number of primitive ranges we're guaranteed to support
* in SVGA_3D_CMD_DRAWPRIMITIVES.
*/
#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
 
/*
* Identifiers for commands in the command FIFO.
*
* IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
* the SVGA3D protocol and remain reserved; they should not be used in the
* future.
*
* IDs between 1040 and 1999 (inclusive) are available for use by the
* current SVGA3D protocol.
*
* FIFO clients other than SVGA3D should stay below 1000, or at 2000
* and up.
*/
 
#define SVGA_3D_CMD_LEGACY_BASE 1000
#define SVGA_3D_CMD_BASE 1040
 
#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */
#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */
#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */
#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
#define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30
#define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31
#define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40
#define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41
#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42
 
#define SVGA_3D_CMD_FUTURE_MAX 2000
 
/*
* Common substructures used in multiple FIFO commands:
*/
 
typedef struct {
union {
struct {
uint16 function; /* SVGA3dFogFunction */
uint8 type; /* SVGA3dFogType */
uint8 base; /* SVGA3dFogBase */
};
uint32 uintValue;
};
} SVGA3dFogMode;
 
/*
* Uniquely identify one image (a 1D/2D/3D array) from a surface. This
* is a surface ID as well as face/mipmap indices.
*/
 
typedef
struct SVGA3dSurfaceImageId {
uint32 sid;
uint32 face;
uint32 mipmap;
} SVGA3dSurfaceImageId;
 
typedef
struct SVGA3dGuestImage {
SVGAGuestPtr ptr;
 
/*
* A note on interpretation of pitch: This value of pitch is the
* number of bytes between vertically adjacent image
* blocks. Normally this is the number of bytes between the first
* pixel of two adjacent scanlines. With compressed textures,
* however, this may represent the number of bytes between
* compression blocks rather than between rows of pixels.
*
* XXX: Compressed textures currently must be tightly packed in guest memory.
*
* If the image is 1-dimensional, pitch is ignored.
*
* If 'pitch' is zero, the SVGA3D device calculates a pitch value
* assuming each row of blocks is tightly packed.
*/
uint32 pitch;
} SVGA3dGuestImage;
 
 
/*
* FIFO command format definitions:
*/
 
/*
* The data size header following cmdNum for every 3d command
*/
typedef
struct {
uint32 id;
uint32 size;
} SVGA3dCmdHeader;
 
/*
* A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
* optional mipmaps and cube faces.
*/
 
typedef
struct {
uint32 width;
uint32 height;
uint32 depth;
} SVGA3dSize;
 
typedef enum {
SVGA3D_SURFACE_CUBEMAP = (1 << 0),
SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9),
SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10),
} SVGA3dSurfaceFlags;
 
typedef
struct {
uint32 numMipLevels;
} SVGA3dSurfaceFace;
 
typedef
struct {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
* structures must have the same value of numMipLevels field.
* Otherwise, all but the first SVGA3dSurfaceFace structures must have the
* numMipLevels set to 0.
*/
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
* A note on surface sizes: Sizes are always specified in pixels,
* even if the true surface size is not a multiple of the minimum
* block size of the surface's format. For example, a 3x3x1 DXT1
* compressed texture would actually be stored as a 4x4x1 image in
* memory.
*/
} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
 
typedef
struct {
uint32 sid;
SVGA3dSurfaceFlags surfaceFlags;
SVGA3dSurfaceFormat format;
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
* structures must have the same value of numMipLevels field.
* Otherwise, all but the first SVGA3dSurfaceFace structures must have the
* numMipLevels set to 0.
*/
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
* A note on surface sizes: Sizes are always specified in pixels,
* even if the true surface size is not a multiple of the minimum
* block size of the surface's format. For example, a 3x3x1 DXT1
* compressed texture would actually be stored as a 4x4x1 image in
* memory.
*/
} SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
 
typedef
struct {
uint32 sid;
} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
 
typedef
struct {
uint32 cid;
} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
 
typedef
struct {
uint32 cid;
} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
 
typedef
struct {
uint32 cid;
SVGA3dClearFlag clearFlag;
uint32 color;
float depth;
uint32 stencil;
/* Followed by variable number of SVGA3dRect structures */
} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
 
typedef
struct SVGA3dCopyRect {
uint32 x;
uint32 y;
uint32 w;
uint32 h;
uint32 srcx;
uint32 srcy;
} SVGA3dCopyRect;
 
typedef
struct SVGA3dCopyBox {
uint32 x;
uint32 y;
uint32 z;
uint32 w;
uint32 h;
uint32 d;
uint32 srcx;
uint32 srcy;
uint32 srcz;
} SVGA3dCopyBox;
 
typedef
struct {
uint32 x;
uint32 y;
uint32 w;
uint32 h;
} SVGA3dRect;
 
typedef
struct {
uint32 x;
uint32 y;
uint32 z;
uint32 w;
uint32 h;
uint32 d;
} SVGA3dBox;
 
typedef
struct {
uint32 x;
uint32 y;
uint32 z;
} SVGA3dPoint;
 
typedef
struct {
SVGA3dLightType type;
SVGA3dBool inWorldSpace;
float diffuse[4];
float specular[4];
float ambient[4];
float position[4];
float direction[4];
float range;
float falloff;
float attenuation0;
float attenuation1;
float attenuation2;
float theta;
float phi;
} SVGA3dLightData;
 
typedef
struct {
uint32 sid;
/* Followed by variable number of SVGA3dCopyRect structures */
} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
 
typedef
struct {
SVGA3dRenderStateName state;
union {
uint32 uintValue;
float floatValue;
};
} SVGA3dRenderState;
 
typedef
struct {
uint32 cid;
/* Followed by variable number of SVGA3dRenderState structures */
} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
 
typedef
struct {
uint32 cid;
SVGA3dRenderTargetType type;
SVGA3dSurfaceImageId target;
} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
 
typedef
struct {
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dest;
/* Followed by variable number of SVGA3dCopyBox structures */
} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
 
typedef
struct {
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dest;
SVGA3dBox boxSrc;
SVGA3dBox boxDest;
SVGA3dStretchBltMode mode;
} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
 
typedef
struct {
/*
* If the discard flag is present in a surface DMA operation, the host may
* discard the contents of the current mipmap level and face of the target
* surface before applying the surface DMA contents.
*/
uint32 discard : 1;
 
/*
* If the unsynchronized flag is present, the host may perform this upload
* without syncing to pending reads on this surface.
*/
uint32 unsynchronized : 1;
 
/*
* Guests *MUST* set the reserved bits to 0 before submitting the command
* suffix as future flags may occupy these bits.
*/
uint32 reserved : 30;
} SVGA3dSurfaceDMAFlags;
 
typedef
struct {
SVGA3dGuestImage guest;
SVGA3dSurfaceImageId host;
SVGA3dTransferType transfer;
/*
* Followed by variable number of SVGA3dCopyBox structures. For consistency
* in all clipping logic and coordinate translation, we define the
* "source" in each copyBox as the guest image and the
* "destination" as the host image, regardless of transfer
* direction.
*
* For efficiency, the SVGA3D device is free to copy more data than
* specified. For example, it may round copy boxes outwards such
* that they lie on particular alignment boundaries.
*/
} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
 
/*
* SVGA3dCmdSurfaceDMASuffix --
*
* This is a command suffix that will appear after a SurfaceDMA command in
* the FIFO. It contains some extra information that hosts may use to
* optimize performance or protect the guest. This suffix exists to preserve
* backwards compatibility while also allowing for new functionality to be
* implemented.
*/
 
typedef
struct {
uint32 suffixSize;
 
/*
* The maximum offset is used to determine the maximum offset from the
* guestPtr base address that will be accessed or written to during this
* surfaceDMA. If the suffix is supported, the host will respect this
* boundary while performing surface DMAs.
*
* Defaults to MAX_UINT32
*/
uint32 maximumOffset;
 
/*
* A set of flags that describes optimizations that the host may perform
* while performing this surface DMA operation. The guest should never rely
* on behaviour that is different when these flags are set for correctness.
*
* Defaults to 0
*/
SVGA3dSurfaceDMAFlags flags;
} SVGA3dCmdSurfaceDMASuffix;
 
/*
* SVGA_3D_CMD_DRAW_PRIMITIVES --
*
* This command is the SVGA3D device's generic drawing entry point.
* It can draw multiple ranges of primitives, optionally using an
* index buffer, using an arbitrary collection of vertex buffers.
*
* Each SVGA3dVertexDecl defines a distinct vertex array to bind
* during this draw call. The declarations specify which surface
* the vertex data lives in, what that vertex data is used for,
* and how to interpret it.
*
* Each SVGA3dPrimitiveRange defines a collection of primitives
* to render using the same vertex arrays. An index buffer is
* optional.
*/
 
typedef
struct {
/*
* A range hint is an optional specification for the range of indices
* in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
* that the entire array will be used.
*
* These are only hints. The SVGA3D device may use them for
* performance optimization if possible, but it's also allowed to
* ignore these values.
*/
uint32 first;
uint32 last;
} SVGA3dArrayRangeHint;
 
typedef
struct {
/*
* Define the origin and shape of a vertex or index array. Both
* 'offset' and 'stride' are in bytes. The provided surface will be
* reinterpreted as a flat array of bytes in the same format used
* by surface DMA operations. To avoid unnecessary conversions, the
* surface should be created with the SVGA3D_BUFFER format.
*
* Index 0 in the array starts 'offset' bytes into the surface.
* Index 1 begins at byte 'offset + stride', etc. Array indices may
* not be negative.
*/
uint32 surfaceId;
uint32 offset;
uint32 stride;
} SVGA3dArray;
 
typedef
struct {
/*
* Describe a vertex array's data type, and define how it is to be
* used by the fixed function pipeline or the vertex shader. It
* isn't useful to have two VertexDecls with the same
* VertexArrayIdentity in one draw call.
*/
SVGA3dDeclType type;
SVGA3dDeclMethod method;
SVGA3dDeclUsage usage;
uint32 usageIndex;
} SVGA3dVertexArrayIdentity;
 
typedef
struct {
SVGA3dVertexArrayIdentity identity;
SVGA3dArray array;
SVGA3dArrayRangeHint rangeHint;
} SVGA3dVertexDecl;
 
typedef
struct {
/*
* Define a group of primitives to render, from sequential indices.
*
* The value of 'primitiveType' and 'primitiveCount' imply the
* total number of vertices that will be rendered.
*/
SVGA3dPrimitiveType primType;
uint32 primitiveCount;
 
/*
* Optional index buffer. If indexArray.surfaceId is
* SVGA3D_INVALID_ID, we render without an index buffer. Rendering
* without an index buffer is identical to rendering with an index
* buffer containing the sequence [0, 1, 2, 3, ...].
*
* If an index buffer is in use, indexWidth specifies the width in
* bytes of each index value. It must be less than or equal to
* indexArray.stride.
*
* (Currently, the SVGA3D device requires index buffers to be tightly
* packed. In other words, indexWidth == indexArray.stride)
*/
SVGA3dArray indexArray;
uint32 indexWidth;
 
/*
* Optional index bias. This number is added to all indices from
* indexArray before they are used as vertex array indices. This
* can be used in multiple ways:
*
* - When not using an indexArray, this bias can be used to
* specify where in the vertex arrays to begin rendering.
*
* - A positive number here is equivalent to increasing the
* offset in each vertex array.
*
* - A negative number can be used to render using a small
* vertex array and an index buffer that contains large
* values. This may be used by some applications that
* crop a vertex buffer without modifying their index
* buffer.
*
* Note that rendering with a negative bias value may be slower and
* use more memory than rendering with a positive or zero bias.
*/
int32 indexBias;
} SVGA3dPrimitiveRange;
 
typedef
struct {
uint32 cid;
uint32 numVertexDecls;
uint32 numRanges;
 
/*
* There are two variable size arrays after the
* SVGA3dCmdDrawPrimitives structure. In order,
* they are:
*
* 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
* SVGA3D_MAX_VERTEX_ARRAYS;
* 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
* SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
* 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
* the frequency divisor for the corresponding vertex decl).
*/
} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
 
typedef
struct {
uint32 stage;
SVGA3dTextureStateName name;
union {
uint32 value;
float floatValue;
};
} SVGA3dTextureState;
 
typedef
struct {
uint32 cid;
/* Followed by variable number of SVGA3dTextureState structures */
} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
 
typedef
struct {
uint32 cid;
SVGA3dTransformType type;
float matrix[16];
} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
 
typedef
struct {
float min;
float max;
} SVGA3dZRange;
 
typedef
struct {
uint32 cid;
SVGA3dZRange zRange;
} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
 
typedef
struct {
float diffuse[4];
float ambient[4];
float specular[4];
float emissive[4];
float shininess;
} SVGA3dMaterial;
 
typedef
struct {
uint32 cid;
SVGA3dFace face;
SVGA3dMaterial material;
} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
 
typedef
struct {
uint32 cid;
uint32 index;
SVGA3dLightData data;
} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
 
typedef
struct {
uint32 cid;
uint32 index;
uint32 enabled;
} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
 
typedef
struct {
uint32 cid;
SVGA3dRect rect;
} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
 
typedef
struct {
uint32 cid;
SVGA3dRect rect;
} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
 
typedef
struct {
uint32 cid;
uint32 index;
float plane[4];
} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
 
typedef
struct {
uint32 cid;
uint32 shid;
SVGA3dShaderType type;
/* Followed by variable number of DWORDs for shader bycode */
} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
 
typedef
struct {
uint32 cid;
uint32 shid;
SVGA3dShaderType type;
} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
 
typedef
struct {
uint32 cid;
uint32 reg; /* register number */
SVGA3dShaderType type;
SVGA3dShaderConstType ctype;
uint32 values[4];
} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
 
typedef
struct {
uint32 cid;
SVGA3dShaderType type;
uint32 shid;
} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
 
typedef
struct {
uint32 cid;
SVGA3dQueryType type;
} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
 
typedef
struct {
uint32 cid;
SVGA3dQueryType type;
SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
 
typedef
struct {
uint32 cid; /* Same parameters passed to END_QUERY */
SVGA3dQueryType type;
SVGAGuestPtr guestResult;
} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
 
typedef
struct {
uint32 totalSize; /* Set by guest before query is ended. */
SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
union { /* Set by host on exit from PENDING state */
uint32 result32;
};
} SVGA3dQueryResult;
 
/*
* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
*
* This is a blit from an SVGA3D surface to a Screen Object. Just
* like GMR-to-screen blits, this blit may be directed at a
* specific screen or to the virtual coordinate space.
*
* The blit copies from a rectangular region of an SVGA3D surface
* image to a rectangular region of a screen or screens.
*
* This command takes an optional variable-length list of clipping
* rectangles after the body of the command. If no rectangles are
* specified, there is no clipping region. The entire destRect is
* drawn to. If one or more rectangles are included, they describe
* a clipping region. The clip rectangle coordinates are measured
* relative to the top-left corner of destRect.
*
* This clipping region serves multiple purposes:
*
* - It can be used to perform an irregularly shaped blit more
* efficiently than by issuing many separate blit commands.
*
* - It is equivalent to allowing blits with non-integer
* source coordinates. You could blit just one half-pixel
* of a source, for example, by specifying a larger
* destination rectangle than you need, then removing
* part of it using a clip rectangle.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT
*
* Limitations:
*
* - Currently, no backend supports blits from a mipmap or face
* other than the first one.
*/
 
typedef
struct {
SVGA3dSurfaceImageId srcImage;
SVGASignedRect srcRect;
uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
SVGASignedRect destRect; /* Supports scaling if src/rest different size */
/* Clipping: zero or more SVGASignedRects follow */
} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
 
typedef
struct {
uint32 sid;
SVGA3dTextureFilter filter;
} SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */
 
 
/*
* Capability query index.
*
* Notes:
*
* 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
* fixed-function texture units available. Each of these units
* work in both FFP and Shader modes, and they support texture
* transforms and texture coordinates. The host may have additional
* texture image units that are only usable with shaders.
*
* 2. The BUFFER_FORMAT capabilities are deprecated, and they always
* return TRUE. Even on physical hardware that does not support
* these formats natively, the SVGA3D device will provide an emulation
* which should be invisible to the guest OS.
*
* In general, the SVGA3D device should support any operation on
* any surface format, it just may perform some of these
* operations in software depending on the capabilities of the
* available physical hardware.
*
* XXX: In the future, we will add capabilities that describe in
* detail what formats are supported in hardware for what kinds
* of operations.
*/
 
typedef enum {
SVGA3D_DEVCAP_3D = 0,
SVGA3D_DEVCAP_MAX_LIGHTS = 1,
SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
SVGA3D_DEVCAP_VERTEX_SHADER = 5,
SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
SVGA3D_DEVCAP_QUERY_TYPES = 15,
SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
SVGA3D_DEVCAP_TEXTURE_OPS = 31,
SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
 
/*
* Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
* render targets. This does no include the depth or stencil targets.
*/
SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
 
SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
SVGA3D_DEVCAP_SUPERSAMPLE = 73,
SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
 
/*
* This is the maximum number of SVGA context IDs that the guest
* can define using SVGA_3D_CMD_CONTEXT_DEFINE.
*/
SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
 
/*
* This is the maximum number of SVGA surface IDs that the guest
* can define using SVGA_3D_CMD_SURFACE_DEFINE*.
*/
SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
 
SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
 
SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82,
SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83,
 
/*
* Don't add new caps into the previous section; the values in this
* enumeration must not change. You can put new values right before
* SVGA3D_DEVCAP_MAX.
*/
SVGA3D_DEVCAP_MAX /* This must be the last index. */
} SVGA3dDevCapIndex;
 
typedef union {
Bool b;
uint32 u;
int32 i;
float f;
} SVGA3dDevCapResult;
 
#endif /* _SVGA3D_REG_H_ */
/drivers/video/drm/vmwgfx/svga3d_surfacedefs.h
0,0 → 1,909
/**************************************************************************
*
* Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#ifdef __KERNEL__
 
#include <drm/vmwgfx_drm.h>
#define surf_size_struct struct drm_vmw_size
 
#else /* __KERNEL__ */
 
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
#endif /* ARRAY_SIZE */
 
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
 
#endif /* __KERNEL__ */
 
#include "svga3d_reg.h"
 
/*
* enum svga3d_block_desc describes the active data channels in a block.
*
* There can be at-most four active channels in a block:
* 1. Red, bump W, luminance and depth are stored in the first channel.
* 2. Green, bump V and stencil are stored in the second channel.
* 3. Blue and bump U are stored in the third channel.
* 4. Alpha and bump Q are stored in the fourth channel.
*
* Block channels can be used to store compressed and buffer data:
* 1. For compressed formats, only the data channel is used and its size
* is equal to that of a singular block in the compression scheme.
* 2. For buffer formats, only the data channel is used and its size is
* exactly one byte in length.
* 3. In each case the bit depth represent the size of a singular block.
*
* Note: Compressed and IEEE formats do not use the bitMask structure.
*/
 
enum svga3d_block_desc {
SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
data */
SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
data */
SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
U and V */
SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
data */
SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
data */
SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
channel */
SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
data */
SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
data */
SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
data */
SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
data */
SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
channel */
SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
data */
SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
data */
SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
data depending on the
compression method used */
SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
floating point
representation in
all channels */
SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
data. */
SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
e.g., NV12. */
SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
Y, U, V, e.g., YV12. */
 
SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
SVGA3DBLOCKDESC_GREEN,
SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
SVGA3DBLOCKDESC_BLUE,
SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
SVGA3DBLOCKDESC_SRGB,
SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
SVGA3DBLOCKDESC_ALPHA,
SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
SVGA3DBLOCKDESC_SRGB,
SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
SVGA3DBLOCKDESC_V,
SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
SVGA3DBLOCKDESC_LUMINANCE,
SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
SVGA3DBLOCKDESC_W,
SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
SVGA3DBLOCKDESC_ALPHA,
SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
SVGA3DBLOCKDESC_V |
SVGA3DBLOCKDESC_W |
SVGA3DBLOCKDESC_Q,
SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
SVGA3DBLOCKDESC_ALPHA,
SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
SVGA3DBLOCKDESC_IEEE_FP,
SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
SVGA3DBLOCKDESC_GREEN,
SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
SVGA3DBLOCKDESC_BLUE,
SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
SVGA3DBLOCKDESC_ALPHA,
SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
SVGA3DBLOCKDESC_STENCIL,
SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
SVGA3DBLOCKDESC_Y,
SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
SVGA3DBLOCKDESC_Y |
SVGA3DBLOCKDESC_U_VIDEO |
SVGA3DBLOCKDESC_V_VIDEO,
SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
SVGA3DBLOCKDESC_EXP,
SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
SVGA3DBLOCKDESC_SRGB,
SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
SVGA3DBLOCKDESC_2PLANAR_YUV,
SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
SVGA3DBLOCKDESC_3PLANAR_YUV,
};
 
/*
* SVGA3dSurfaceDesc describes the actual pixel data.
*
* This structure provides the following information:
* 1. Block description.
* 2. Dimensions of a block in the surface.
* 3. Size of block in bytes.
* 4. Bit depth of the pixel data.
* 5. Channel bit depths and masks (if applicable).
*/
#define SVGA3D_CHANNEL_DEF(type) \
struct { \
union { \
type blue; \
type u; \
type uv_video; \
type u_video; \
}; \
union { \
type green; \
type v; \
type stencil; \
type v_video; \
}; \
union { \
type red; \
type w; \
type luminance; \
type y; \
type depth; \
type data; \
}; \
union { \
type alpha; \
type q; \
type exp; \
}; \
}
 
struct svga3d_surface_desc {
enum svga3d_block_desc block_desc;
surf_size_struct block_size;
u32 bytes_per_block;
u32 pitch_bytes_per_block;
 
struct {
u32 total;
SVGA3D_CHANNEL_DEF(uint8);
} bit_depth;
 
struct {
SVGA3D_CHANNEL_DEF(uint8);
} bit_offset;
};
 
static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
 
{SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
 
{SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
{{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
 
{SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
{{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
{{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
{{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
 
{SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
 
{SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
 
{SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
 
{SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
{{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
 
{SVGA3DBLOCKDESC_LUMINANCE,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
 
{SVGA3DBLOCKDESC_LA,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
{{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
 
{SVGA3DBLOCKDESC_LUMINANCE,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
 
{SVGA3DBLOCKDESC_LA,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
{{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
 
{SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
{{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
 
{SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
{{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
 
{SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
 
{SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
 
{SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
 
{SVGA3DBLOCKDESC_RGBA_FP,
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
 
{SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
 
{SVGA3DBLOCKDESC_UVWQ,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
 
{SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
 
{SVGA3DBLOCKDESC_UVL,
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
 
{SVGA3DBLOCKDESC_UVWA,
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
 
{SVGA3DBLOCKDESC_ALPHA,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
 
{SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
 
{SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
 
{SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
 
{SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
 
{SVGA3DBLOCKDESC_BUFFER,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
 
{SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
 
{SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
{{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
{{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
 
{SVGA3DBLOCKDESC_YUV,
{1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
{{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
 
{SVGA3DBLOCKDESC_YUV,
{1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
 
{SVGA3DBLOCKDESC_NV12,
{2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
 
{SVGA3DBLOCKDESC_AYUV,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
 
{SVGA3DBLOCKDESC_UVWQ,
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
 
{SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
 
{SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
 
{SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
 
{SVGA3DBLOCKDESC_UVW,
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
 
{SVGA3DBLOCKDESC_UVWQ,
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
 
{SVGA3DBLOCKDESC_UVWQ,
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
 
{SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
 
{SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
 
{SVGA3DBLOCKDESC_R_FP,
{1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
 
{SVGA3DBLOCKDESC_GREEN,
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
 
{SVGA3DBLOCKDESC_RGB_FP,
{1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
{{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
 
{SVGA3DBLOCKDESC_RGBA_SRGB,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
 
{SVGA3DBLOCKDESC_RG_FP,
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
 
{SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
 
{SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
 
{SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
 
{SVGA3DBLOCKDESC_GREEN,
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
 
{SVGA3DBLOCKDESC_UV,
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
 
{SVGA3DBLOCKDESC_U,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
 
{SVGA3DBLOCKDESC_U,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
 
{SVGA3DBLOCKDESC_RED,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
 
{SVGA3DBLOCKDESC_U,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
 
{SVGA3DBLOCKDESC_U,
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
 
{SVGA3DBLOCKDESC_RED,
{8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
 
{SVGA3DBLOCKDESC_RGBE,
{1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
{{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
 
{SVGA3DBLOCKDESC_RG,
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
 
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
 
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
 
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
 
{SVGA3DBLOCKDESC_COMPRESSED,
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
 
{SVGA3DBLOCKDESC_RGBA,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
 
{SVGA3DBLOCKDESC_RGBA_SRGB,
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
 
{SVGA3DBLOCKDESC_RGB,
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
 
{SVGA3DBLOCKDESC_RGB_SRGB,
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
 
{SVGA3DBLOCKDESC_DEPTH,
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
 
{SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
 
{SVGA3DBLOCKDESC_DS,
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
};
 
static inline u32 clamped_umul32(u32 a, u32 b)
{
uint64_t tmp = (uint64_t) a*b;
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
}
 
static inline const struct svga3d_surface_desc *
svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
{
if (format < ARRAY_SIZE(svga3d_surface_descs))
return &svga3d_surface_descs[format];
 
return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
}
 
/*
*----------------------------------------------------------------------
*
* svga3dsurface_get_mip_size --
*
* Given a base level size and the mip level, compute the size of
* the mip level.
*
* Results:
* See above.
*
* Side effects:
* None.
*
*----------------------------------------------------------------------
*/
 
static inline surf_size_struct
svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
{
surf_size_struct size;
 
size.width = max_t(u32, base_level.width >> mip_level, 1);
size.height = max_t(u32, base_level.height >> mip_level, 1);
size.depth = max_t(u32, base_level.depth >> mip_level, 1);
return size;
}
 
static inline void
svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
const surf_size_struct *pixel_size,
surf_size_struct *block_size)
{
block_size->width = DIV_ROUND_UP(pixel_size->width,
desc->block_size.width);
block_size->height = DIV_ROUND_UP(pixel_size->height,
desc->block_size.height);
block_size->depth = DIV_ROUND_UP(pixel_size->depth,
desc->block_size.depth);
}
 
static inline bool
svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
{
return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
}
 
static inline u32
svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
const surf_size_struct *size)
{
u32 pitch;
surf_size_struct blocks;
 
svga3dsurface_get_size_in_blocks(desc, size, &blocks);
 
pitch = blocks.width * desc->pitch_bytes_per_block;
 
return pitch;
}
 
/*
*-----------------------------------------------------------------------------
*
* svga3dsurface_get_image_buffer_size --
*
* Return the number of bytes of buffer space required to store
* one image of a surface, optionally using the specified pitch.
*
* If pitch is zero, it is assumed that rows are tightly packed.
*
* This function is overflow-safe. If the result would have
* overflowed, instead we return MAX_UINT32.
*
* Results:
* Byte count.
*
* Side effects:
* None.
*
*-----------------------------------------------------------------------------
*/
 
static inline u32
svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
const surf_size_struct *size,
u32 pitch)
{
surf_size_struct image_blocks;
u32 slice_size, total_size;
 
svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
 
if (svga3dsurface_is_planar_surface(desc)) {
total_size = clamped_umul32(image_blocks.width,
image_blocks.height);
total_size = clamped_umul32(total_size, image_blocks.depth);
total_size = clamped_umul32(total_size, desc->bytes_per_block);
return total_size;
}
 
if (pitch == 0)
pitch = svga3dsurface_calculate_pitch(desc, size);
 
slice_size = clamped_umul32(image_blocks.height, pitch);
total_size = clamped_umul32(slice_size, image_blocks.depth);
 
return total_size;
}
 
static inline u32
svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
surf_size_struct base_level_size,
u32 num_mip_levels,
bool cubemap)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
u32 total_size = 0;
u32 mip;
 
for (mip = 0; mip < num_mip_levels; mip++) {
surf_size_struct size =
svga3dsurface_get_mip_size(base_level_size, mip);
total_size += svga3dsurface_get_image_buffer_size(desc,
&size, 0);
}
 
if (cubemap)
total_size *= SVGA3D_MAX_SURFACE_FACES;
 
return total_size;
}
 
 
/**
* svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
* in an image (or volume).
*
* @width: The image width in pixels.
* @height: The image height in pixels
*/
static inline u32
svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
u32 width, u32 height,
u32 x, u32 y, u32 z)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
const u32 bw = desc->block_size.width, bh = desc->block_size.height;
const u32 bd = desc->block_size.depth;
const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
const u32 offset = (z / bd * imgstride +
y / bh * rowstride +
x / bw * desc->bytes_per_block);
return offset;
}
 
 
static inline u32
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
surf_size_struct baseLevelSize,
u32 numMipLevels,
u32 face,
u32 mip)
 
{
u32 offset;
u32 mipChainBytes;
u32 mipChainBytesToLevel;
u32 i;
const struct svga3d_surface_desc *desc;
surf_size_struct mipSize;
u32 bytes;
 
desc = svga3dsurface_get_desc(format);
 
mipChainBytes = 0;
mipChainBytesToLevel = 0;
for (i = 0; i < numMipLevels; i++) {
mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
mipChainBytes += bytes;
if (i < mip)
mipChainBytesToLevel += bytes;
}
 
offset = mipChainBytes * face + mipChainBytesToLevel;
 
return offset;
}
/drivers/video/drm/vmwgfx/svga_reg.h
0,0 → 1,1552
/**********************************************************
* Copyright 1998-2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
 
/*
* svga_reg.h --
*
* Virtual hardware definitions for the VMware SVGA II device.
*/
 
#ifndef _SVGA_REG_H_
#define _SVGA_REG_H_
 
/*
* PCI device IDs.
*/
#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
 
/*
* SVGA_REG_ENABLE bit definitions.
*/
#define SVGA_REG_ENABLE_DISABLE 0
#define SVGA_REG_ENABLE_ENABLE 1
#define SVGA_REG_ENABLE_HIDE 2
#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
SVGA_REG_ENABLE_HIDE)
 
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
* cursor bypass mode. This is still supported, but no new guest
* drivers should use it.
*/
#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
 
/*
* The maximum framebuffer size that can traced for e.g. guests in VESA mode.
* The changeMap in the monitor is proportional to this number. Therefore, we'd
* like to keep it as small as possible to reduce monitor overhead (using
* SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
* 4k!).
*
* NB: For compatibility reasons, this value must be greater than 0xff0000.
* See bug 335072.
*/
#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
 
#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
 
#define SVGA_MAGIC 0x900000UL
#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
 
/* Version 2 let the address of the frame buffer be unsigned on Win32 */
#define SVGA_VERSION_2 2
#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
 
/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
PALETTE_BASE has moved */
#define SVGA_VERSION_1 1
#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
 
/* Version 0 is the initial version */
#define SVGA_VERSION_0 0
#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
 
/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
#define SVGA_ID_INVALID 0xFFFFFFFF
 
/* Port offsets, relative to BAR0 */
#define SVGA_INDEX_PORT 0x0
#define SVGA_VALUE_PORT 0x1
#define SVGA_BIOS_PORT 0x2
#define SVGA_IRQSTATUS_PORT 0x8
 
/*
* Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
*
* Interrupts are only supported when the
* SVGA_CAP_IRQMASK capability is present.
*/
#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
 
/*
* Registers
*/
 
enum {
SVGA_REG_ID = 0,
SVGA_REG_ENABLE = 1,
SVGA_REG_WIDTH = 2,
SVGA_REG_HEIGHT = 3,
SVGA_REG_MAX_WIDTH = 4,
SVGA_REG_MAX_HEIGHT = 5,
SVGA_REG_DEPTH = 6,
SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */
SVGA_REG_PSEUDOCOLOR = 8,
SVGA_REG_RED_MASK = 9,
SVGA_REG_GREEN_MASK = 10,
SVGA_REG_BLUE_MASK = 11,
SVGA_REG_BYTES_PER_LINE = 12,
SVGA_REG_FB_START = 13, /* (Deprecated) */
SVGA_REG_FB_OFFSET = 14,
SVGA_REG_VRAM_SIZE = 15,
SVGA_REG_FB_SIZE = 16,
 
/* ID 0 implementation only had the above registers, then the palette */
 
SVGA_REG_CAPABILITIES = 17,
SVGA_REG_MEM_START = 18, /* (Deprecated) */
SVGA_REG_MEM_SIZE = 19,
SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */
SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */
SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */
SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */
SVGA_REG_IRQMASK = 33, /* Interrupt mask */
 
/* Legacy multi-monitor support */
SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */
SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */
SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */
 
/* See "Guest memory regions" below. */
SVGA_REG_GMR_ID = 41,
SVGA_REG_GMR_DESCRIPTOR = 42,
SVGA_REG_GMR_MAX_IDS = 43,
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
 
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
SVGA_REG_TOP = 48, /* Must be 1 more than the last register */
 
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
 
SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
/* Base of scratch registers */
/* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
First 4 are reserved for VESA BIOS Extension; any remaining are for
the use of the current SVGA driver. */
};
 
 
/*
* Guest memory regions (GMRs):
*
* This is a new memory mapping feature available in SVGA devices
* which have the SVGA_CAP_GMR bit set. Previously, there were two
* fixed memory regions available with which to share data between the
* device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
* are our name for an extensible way of providing arbitrary DMA
* buffers for use between the driver and the SVGA device. They are a
* new alternative to framebuffer memory, usable for both 2D and 3D
* graphics operations.
*
* Since GMR mapping must be done synchronously with guest CPU
* execution, we use a new pair of SVGA registers:
*
* SVGA_REG_GMR_ID --
*
* Read/write.
* This register holds the 32-bit ID (a small positive integer)
* of a GMR to create, delete, or redefine. Writing this register
* has no side-effects.
*
* SVGA_REG_GMR_DESCRIPTOR --
*
* Write-only.
* Writing this register will create, delete, or redefine the GMR
* specified by the above ID register. If this register is zero,
* the GMR is deleted. Any pointers into this GMR (including those
* currently being processed by FIFO commands) will be
* synchronously invalidated.
*
* If this register is nonzero, it must be the physical page
* number (PPN) of a data structure which describes the physical
* layout of the memory region this GMR should describe. The
* descriptor structure will be read synchronously by the SVGA
* device when this register is written. The descriptor need not
* remain allocated for the lifetime of the GMR.
*
* The guest driver should write SVGA_REG_GMR_ID first, then
* SVGA_REG_GMR_DESCRIPTOR.
*
* SVGA_REG_GMR_MAX_IDS --
*
* Read-only.
* The SVGA device may choose to support a maximum number of
* user-defined GMR IDs. This register holds the number of supported
* IDs. (The maximum supported ID plus 1)
*
* SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
*
* Read-only.
* The SVGA device may choose to put a limit on the total number
* of SVGAGuestMemDescriptor structures it will read when defining
* a single GMR.
*
* The descriptor structure is an array of SVGAGuestMemDescriptor
* structures. Each structure may do one of three things:
*
* - Terminate the GMR descriptor list.
* (ppn==0, numPages==0)
*
* - Add a PPN or range of PPNs to the GMR's virtual address space.
* (ppn != 0, numPages != 0)
*
* - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
* support multi-page GMR descriptor tables without forcing the
* driver to allocate physically contiguous memory.
* (ppn != 0, numPages == 0)
*
* Note that each physical page of SVGAGuestMemDescriptor structures
* can describe at least 2MB of guest memory. If the driver needs to
* use more than one page of descriptor structures, it must use one of
* its SVGAGuestMemDescriptors to point to an additional page. The
* device will never automatically cross a page boundary.
*
* Once the driver has described a GMR, it is immediately available
* for use via any FIFO command that uses an SVGAGuestPtr structure.
* These pointers include a GMR identifier plus an offset into that
* GMR.
*
* The driver must check the SVGA_CAP_GMR bit before using the GMR
* registers.
*/
 
/*
* Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
* memory as well. In the future, these IDs could even be used to
* allow legacy memory regions to be redefined by the guest as GMRs.
*
* Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
* is being phased out. Please try to use user-defined GMRs whenever
* possible.
*/
#define SVGA_GMR_NULL ((uint32) -1)
#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */
 
typedef
struct SVGAGuestMemDescriptor {
uint32 ppn;
uint32 numPages;
} SVGAGuestMemDescriptor;
 
typedef
struct SVGAGuestPtr {
uint32 gmrId;
uint32 offset;
} SVGAGuestPtr;
 
 
/*
* SVGAGMRImageFormat --
*
* This is a packed representation of the source 2D image format
* for a GMR-to-screen blit. Currently it is defined as an encoding
* of the screen's color depth and bits-per-pixel, however, 16 bits
* are reserved for future use to identify other encodings (such as
* RGBA or higher-precision images).
*
* Currently supported formats:
*
* bpp depth Format Name
* --- ----- -----------
* 32 24 32-bit BGRX
* 24 24 24-bit BGR
* 16 16 RGB 5-6-5
* 16 15 RGB 5-5-5
*
*/
 
typedef
struct SVGAGMRImageFormat {
union {
struct {
uint32 bitsPerPixel : 8;
uint32 colorDepth : 8;
uint32 reserved : 16; /* Must be zero */
};
 
uint32 value;
};
} SVGAGMRImageFormat;
 
typedef
struct SVGAGuestImage {
SVGAGuestPtr ptr;
 
/*
* A note on interpretation of pitch: This value of pitch is the
* number of bytes between vertically adjacent image
* blocks. Normally this is the number of bytes between the first
* pixel of two adjacent scanlines. With compressed textures,
* however, this may represent the number of bytes between
* compression blocks rather than between rows of pixels.
*
* XXX: Compressed textures currently must be tightly packed in guest memory.
*
* If the image is 1-dimensional, pitch is ignored.
*
* If 'pitch' is zero, the SVGA3D device calculates a pitch value
* assuming each row of blocks is tightly packed.
*/
uint32 pitch;
} SVGAGuestImage;
 
/*
* SVGAColorBGRX --
*
* A 24-bit color format (BGRX), which does not depend on the
* format of the legacy guest framebuffer (GFB) or the current
* GMRFB state.
*/
 
typedef
struct SVGAColorBGRX {
union {
struct {
uint32 b : 8;
uint32 g : 8;
uint32 r : 8;
uint32 x : 8; /* Unused */
};
 
uint32 value;
};
} SVGAColorBGRX;
 
 
/*
* SVGASignedRect --
* SVGASignedPoint --
*
* Signed rectangle and point primitives. These are used by the new
* 2D primitives for drawing to Screen Objects, which can occupy a
* signed virtual coordinate space.
*
* SVGASignedRect specifies a half-open interval: the (left, top)
* pixel is part of the rectangle, but the (right, bottom) pixel is
* not.
*/
 
typedef
struct SVGASignedRect {
int32 left;
int32 top;
int32 right;
int32 bottom;
} SVGASignedRect;
 
typedef
struct SVGASignedPoint {
int32 x;
int32 y;
} SVGASignedPoint;
 
 
/*
* Capabilities
*
* Note the holes in the bitfield. Missing bits have been deprecated,
* and must not be reused. Those capabilities will never be reported
* by new versions of the SVGA device.
*
* SVGA_CAP_GMR2 --
* Provides asynchronous commands to define and remap guest memory
* regions. Adds device registers SVGA_REG_GMRS_MAX_PAGES and
* SVGA_REG_MEMORY_SIZE.
*
* SVGA_CAP_SCREEN_OBJECT_2 --
* Allow screen object support, and require backing stores from the
* guest for each screen object.
*/
 
#define SVGA_CAP_NONE 0x00000000
#define SVGA_CAP_RECT_COPY 0x00000002
#define SVGA_CAP_CURSOR 0x00000020
#define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */
#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */
#define SVGA_CAP_8BIT_EMULATION 0x00000100
#define SVGA_CAP_ALPHA_CURSOR 0x00000200
#define SVGA_CAP_3D 0x00004000
#define SVGA_CAP_EXTENDED_FIFO 0x00008000
#define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */
#define SVGA_CAP_PITCHLOCK 0x00020000
#define SVGA_CAP_IRQMASK 0x00040000
#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */
#define SVGA_CAP_GMR 0x00100000
#define SVGA_CAP_TRACES 0x00200000
#define SVGA_CAP_GMR2 0x00400000
#define SVGA_CAP_SCREEN_OBJECT_2 0x00800000
 
 
/*
* FIFO register indices.
*
* The FIFO is a chunk of device memory mapped into guest physmem. It
* is always treated as 32-bit words.
*
* The guest driver gets to decide how to partition it between
* - FIFO registers (there are always at least 4, specifying where the
* following data area is and how much data it contains; there may be
* more registers following these, depending on the FIFO protocol
* version in use)
* - FIFO data, written by the guest and slurped out by the VMX.
* These indices are 32-bit word offsets into the FIFO.
*/
 
enum {
/*
* Block 1 (basic registers): The originally defined FIFO registers.
* These exist and are valid for all versions of the FIFO protocol.
*/
 
SVGA_FIFO_MIN = 0,
SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
SVGA_FIFO_NEXT_CMD,
SVGA_FIFO_STOP,
 
/*
* Block 2 (extended registers): Mandatory registers for the extended
* FIFO. These exist if the SVGA caps register includes
* SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
* associated capability bit is enabled.
*
* Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
* support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
* This means that the guest has to test individually (in most cases
* using FIFO caps) for the presence of registers after this; the VMX
* can define "extended FIFO" to mean whatever it wants, and currently
* won't enable it unless there's room for that set and much more.
*/
 
SVGA_FIFO_CAPABILITIES = 4,
SVGA_FIFO_FLAGS,
/* Valid with SVGA_FIFO_CAP_FENCE: */
SVGA_FIFO_FENCE,
 
/*
* Block 3a (optional extended registers): Additional registers for the
* extended FIFO, whose presence isn't actually implied by
* SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
* leave room for them.
*
* These in block 3a, the VMX currently considers mandatory for the
* extended FIFO.
*/
 
/* Valid if exists (i.e. if extended FIFO enabled): */
SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
/* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
SVGA_FIFO_PITCHLOCK,
 
/* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
 
/* Valid with SVGA_FIFO_CAP_RESERVE: */
SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
 
/*
* Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
*
* By default this is SVGA_ID_INVALID, to indicate that the cursor
* coordinates are specified relative to the virtual root. If this
* is set to a specific screen ID, cursor position is reinterpreted
* as a signed offset relative to that screen's origin.
*/
SVGA_FIFO_CURSOR_SCREEN_ID,
 
/*
* Valid with SVGA_FIFO_CAP_DEAD
*
* An arbitrary value written by the host, drivers should not use it.
*/
SVGA_FIFO_DEAD,
 
/*
* Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
*
* Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
* on platforms that can enforce graphics resource limits.
*/
SVGA_FIFO_3D_HWVERSION_REVISED,
 
/*
* XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
* registers, but this must be done carefully and with judicious use of
* capability bits, since comparisons based on SVGA_FIFO_MIN aren't
* enough to tell you whether the register exists: we've shipped drivers
* and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
* the earlier ones. The actual order of introduction was:
* - PITCHLOCK
* - 3D_CAPS
* - CURSOR_* (cursor bypass 3)
* - RESERVED
* So, code that wants to know whether it can use any of the
* aforementioned registers, or anything else added after PITCHLOCK and
* before 3D_CAPS, needs to reason about something other than
* SVGA_FIFO_MIN.
*/
 
/*
* 3D caps block space; valid with 3D hardware version >=
* SVGA3D_HWVERSION_WS6_B1.
*/
SVGA_FIFO_3D_CAPS = 32,
SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
 
/*
* End of VMX's current definition of "extended-FIFO registers".
* Registers before here are always enabled/disabled as a block; either
* the extended FIFO is enabled and includes all preceding registers, or
* it's disabled entirely.
*
* Block 3b (truly optional extended registers): Additional registers for
* the extended FIFO, which the VMX already knows how to enable and
* disable with correct granularity.
*
* Registers after here exist if and only if the guest SVGA driver
* sets SVGA_FIFO_MIN high enough to leave room for them.
*/
 
/* Valid if register exists: */
SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
 
/*
* Always keep this last. This defines the maximum number of
* registers we know about. At power-on, this value is placed in
* the SVGA_REG_MEM_REGS register, and we expect the guest driver
* to allocate this much space in FIFO memory for registers.
*/
SVGA_FIFO_NUM_REGS
};
 
 
/*
* Definition of registers included in extended FIFO support.
*
* The guest SVGA driver gets to allocate the FIFO between registers
* and data. It must always allocate at least 4 registers, but old
* drivers stopped there.
*
* The VMX will enable extended FIFO support if and only if the guest
* left enough room for all registers defined as part of the mandatory
* set for the extended FIFO.
*
* Note that the guest drivers typically allocate the FIFO only at
* initialization time, not at mode switches, so it's likely that the
* number of FIFO registers won't change without a reboot.
*
* All registers less than this value are guaranteed to be present if
* svgaUser->fifo.extended is set. Any later registers must be tested
* individually for compatibility at each use (in the VMX).
*
* This value is used only by the VMX, so it can change without
* affecting driver compatibility; keep it that way?
*/
#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
 
 
/*
* FIFO Synchronization Registers
*
* This explains the relationship between the various FIFO
* sync-related registers in IOSpace and in FIFO space.
*
* SVGA_REG_SYNC --
*
* The SYNC register can be used in two different ways by the guest:
*
* 1. If the guest wishes to fully sync (drain) the FIFO,
* it will write once to SYNC then poll on the BUSY
* register. The FIFO is sync'ed once BUSY is zero.
*
* 2. If the guest wants to asynchronously wake up the host,
* it will write once to SYNC without polling on BUSY.
* Ideally it will do this after some new commands have
* been placed in the FIFO, and after reading a zero
* from SVGA_FIFO_BUSY.
*
* (1) is the original behaviour that SYNC was designed to
* support. Originally, a write to SYNC would implicitly
* trigger a read from BUSY. This causes us to synchronously
* process the FIFO.
*
* This behaviour has since been changed so that writing SYNC
* will *not* implicitly cause a read from BUSY. Instead, it
* makes a channel call which asynchronously wakes up the MKS
* thread.
*
* New guests can use this new behaviour to implement (2)
* efficiently. This lets guests get the host's attention
* without waiting for the MKS to poll, which gives us much
* better CPU utilization on SMP hosts and on UP hosts while
* we're blocked on the host GPU.
*
* Old guests shouldn't notice the behaviour change. SYNC was
* never guaranteed to process the entire FIFO, since it was
* bounded to a particular number of CPU cycles. Old guests will
* still loop on the BUSY register until the FIFO is empty.
*
* Writing to SYNC currently has the following side-effects:
*
* - Sets SVGA_REG_BUSY to TRUE (in the monitor)
* - Asynchronously wakes up the MKS thread for FIFO processing
* - The value written to SYNC is recorded as a "reason", for
* stats purposes.
*
* If SVGA_FIFO_BUSY is available, drivers are advised to only
* write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
* SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
* eventually set SVGA_FIFO_BUSY on its own, but this approach
* lets the driver avoid sending multiple asynchronous wakeup
* messages to the MKS thread.
*
* SVGA_REG_BUSY --
*
* This register is set to TRUE when SVGA_REG_SYNC is written,
* and it reads as FALSE when the FIFO has been completely
* drained.
*
* Every read from this register causes us to synchronously
* process FIFO commands. There is no guarantee as to how many
* commands each read will process.
*
* CPU time spent processing FIFO commands will be billed to
* the guest.
*
* New drivers should avoid using this register unless they
* need to guarantee that the FIFO is completely drained. It
* is overkill for performing a sync-to-fence. Older drivers
* will use this register for any type of synchronization.
*
* SVGA_FIFO_BUSY --
*
* This register is a fast way for the guest driver to check
* whether the FIFO is already being processed. It reads and
* writes at normal RAM speeds, with no monitor intervention.
*
* If this register reads as TRUE, the host is guaranteeing that
* any new commands written into the FIFO will be noticed before
* the MKS goes back to sleep.
*
* If this register reads as FALSE, no such guarantee can be
* made.
*
* The guest should use this register to quickly determine
* whether or not it needs to wake up the host. If the guest
* just wrote a command or group of commands that it would like
* the host to begin processing, it should:
*
* 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
* action is necessary.
*
* 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
* code that we've already sent a SYNC to the host and we
* don't need to send a duplicate.
*
* 3. Write a reason to SVGA_REG_SYNC. This will send an
* asynchronous wakeup to the MKS thread.
*/
 
 
/*
* FIFO Capabilities
*
* Fence -- Fence register and command are supported
* Accel Front -- Front buffer only commands are supported
* Pitch Lock -- Pitch lock register is supported
* Video -- SVGA Video overlay units are supported
* Escape -- Escape command is supported
*
* XXX: Add longer descriptions for each capability, including a list
* of the new features that each capability provides.
*
* SVGA_FIFO_CAP_SCREEN_OBJECT --
*
* Provides dynamic multi-screen rendering, for improved Unity and
* multi-monitor modes. With Screen Object, the guest can
* dynamically create and destroy 'screens', which can represent
* Unity windows or virtual monitors. Screen Object also provides
* strong guarantees that DMA operations happen only when
* guest-initiated. Screen Object deprecates the BAR1 guest
* framebuffer (GFB) and all commands that work only with the GFB.
*
* New registers:
* FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
*
* New 2D commands:
* DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
* BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
*
* New 3D commands:
* BLIT_SURFACE_TO_SCREEN
*
* New guarantees:
*
* - The host will not read or write guest memory, including the GFB,
* except when explicitly initiated by a DMA command.
*
* - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
* is guaranteed to complete before any subsequent FENCEs.
*
* - All legacy commands which affect a Screen (UPDATE, PRESENT,
* PRESENT_READBACK) as well as new Screen blit commands will
* all behave consistently as blits, and memory will be read
* or written in FIFO order.
*
* For example, if you PRESENT from one SVGA3D surface to multiple
* places on the screen, the data copied will always be from the
* SVGA3D surface at the time the PRESENT was issued in the FIFO.
* This was not necessarily true on devices without Screen Object.
*
* This means that on devices that support Screen Object, the
* PRESENT_READBACK command should not be necessary unless you
* actually want to read back the results of 3D rendering into
* system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
* command provides a strict superset of functionality.)
*
* - When a screen is resized, either using Screen Object commands or
* legacy multimon registers, its contents are preserved.
*
* SVGA_FIFO_CAP_GMR2 --
*
* Provides new commands to define and remap guest memory regions (GMR).
*
* New 2D commands:
* DEFINE_GMR2, REMAP_GMR2.
*
* SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
*
* Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
* This register may replace SVGA_FIFO_3D_HWVERSION on platforms
* that enforce graphics resource limits. This allows the platform
* to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
* drivers that do not limit their resources.
*
* Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
* are codependent (and thus we use a single capability bit).
*
* SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
*
* Modifies the DEFINE_SCREEN command to include a guest provided
* backing store in GMR memory and the bytesPerLine for the backing
* store. This capability requires the use of a backing store when
* creating screen objects. However if SVGA_FIFO_CAP_SCREEN_OBJECT
* is present then backing stores are optional.
*
* SVGA_FIFO_CAP_DEAD --
*
* Drivers should not use this cap bit. This cap bit can not be
* reused since some hosts already expose it.
*/
 
#define SVGA_FIFO_CAP_NONE 0
#define SVGA_FIFO_CAP_FENCE (1<<0)
#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
#define SVGA_FIFO_CAP_VIDEO (1<<3)
#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
#define SVGA_FIFO_CAP_ESCAPE (1<<5)
#define SVGA_FIFO_CAP_RESERVE (1<<6)
#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
#define SVGA_FIFO_CAP_GMR2 (1<<8)
#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2
#define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1<<9)
#define SVGA_FIFO_CAP_DEAD (1<<10)
 
 
/*
* FIFO Flags
*
* Accel Front -- Driver should use front buffer only commands
*/
 
#define SVGA_FIFO_FLAG_NONE 0
#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
#define SVGA_FIFO_FLAG_RESERVED (1<<31) /* Internal use only */
 
/*
* FIFO reservation sentinel value
*/
 
#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
 
 
/*
* Video overlay support
*/
 
#define SVGA_NUM_OVERLAY_UNITS 32
 
 
/*
* Video capabilities that the guest is currently using
*/
 
#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
 
 
/*
* Offsets for the video overlay registers
*/
 
enum {
SVGA_VIDEO_ENABLED = 0,
SVGA_VIDEO_FLAGS,
SVGA_VIDEO_DATA_OFFSET,
SVGA_VIDEO_FORMAT,
SVGA_VIDEO_COLORKEY,
SVGA_VIDEO_SIZE, /* Deprecated */
SVGA_VIDEO_WIDTH,
SVGA_VIDEO_HEIGHT,
SVGA_VIDEO_SRC_X,
SVGA_VIDEO_SRC_Y,
SVGA_VIDEO_SRC_WIDTH,
SVGA_VIDEO_SRC_HEIGHT,
SVGA_VIDEO_DST_X, /* Signed int32 */
SVGA_VIDEO_DST_Y, /* Signed int32 */
SVGA_VIDEO_DST_WIDTH,
SVGA_VIDEO_DST_HEIGHT,
SVGA_VIDEO_PITCH_1,
SVGA_VIDEO_PITCH_2,
SVGA_VIDEO_PITCH_3,
SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
SVGA_VIDEO_NUM_REGS
};
 
 
/*
* SVGA Overlay Units
*
* width and height relate to the entire source video frame.
* srcX, srcY, srcWidth and srcHeight represent subset of the source
* video frame to be displayed.
*/
 
typedef struct SVGAOverlayUnit {
uint32 enabled;
uint32 flags;
uint32 dataOffset;
uint32 format;
uint32 colorKey;
uint32 size;
uint32 width;
uint32 height;
uint32 srcX;
uint32 srcY;
uint32 srcWidth;
uint32 srcHeight;
int32 dstX;
int32 dstY;
uint32 dstWidth;
uint32 dstHeight;
uint32 pitches[3];
uint32 dataGMRId;
uint32 dstScreenId;
} SVGAOverlayUnit;
 
 
/*
* SVGAScreenObject --
*
* This is a new way to represent a guest's multi-monitor screen or
* Unity window. Screen objects are only supported if the
* SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
*
* If Screen Objects are supported, they can be used to fully
* replace the functionality provided by the framebuffer registers
* (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
*
* The screen object is a struct with guaranteed binary
* compatibility. New flags can be added, and the struct may grow,
* but existing fields must retain their meaning.
*
* Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
* a SVGAGuestPtr that is used to back the screen contents. This
* memory must come from the GFB. The guest is not allowed to
* access the memory and doing so will have undefined results. The
* backing store is required to be page aligned and the size is
* padded to the next page boundry. The number of pages is:
* (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
*
* The pitch in the backingStore is required to be at least large
* enough to hold a 32bbp scanline. It is recommended that the
* driver pad bytesPerLine for a potential performance win.
*
* The cloneCount field is treated as a hint from the guest that
* the user wants this display to be cloned, countCount times. A
* value of zero means no cloning should happen.
*/
 
#define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */
#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
#define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */
#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
 
/*
* Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is
* deactivated the base layer is defined to lose all contents and
* become black. When a screen is deactivated the backing store is
* optional. When set backingPtr and bytesPerLine will be ignored.
*/
#define SVGA_SCREEN_DEACTIVATE (1 << 3)
 
/*
* Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When this flag is set
* the screen contents will be outputted as all black to the user
* though the base layer contents is preserved. The screen base layer
* can still be read and written to like normal though the no visible
* effect will be seen by the user. When the flag is changed the
* screen will be blanked or redrawn to the current contents as needed
* without any extra commands from the driver. This flag only has an
* effect when the screen is not deactivated.
*/
#define SVGA_SCREEN_BLANKING (1 << 4)
 
typedef
struct SVGAScreenObject {
uint32 structSize; /* sizeof(SVGAScreenObject) */
uint32 id;
uint32 flags;
struct {
uint32 width;
uint32 height;
} size;
struct {
int32 x;
int32 y;
} root;
 
/*
* Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
* with SVGA_FIFO_CAP_SCREEN_OBJECT.
*/
SVGAGuestImage backingStore;
uint32 cloneCount;
} SVGAScreenObject;
 
 
/*
* Commands in the command FIFO:
*
* Command IDs defined below are used for the traditional 2D FIFO
* communication (not all commands are available for all versions of the
* SVGA FIFO protocol).
*
* Note the holes in the command ID numbers: These commands have been
* deprecated, and the old IDs must not be reused.
*
* Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
* protocol.
*
* Each command's parameters are described by the comments and
* structs below.
*/
 
typedef enum {
SVGA_CMD_INVALID_CMD = 0,
SVGA_CMD_UPDATE = 1,
SVGA_CMD_RECT_COPY = 3,
SVGA_CMD_DEFINE_CURSOR = 19,
SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
SVGA_CMD_UPDATE_VERBOSE = 25,
SVGA_CMD_FRONT_ROP_FILL = 29,
SVGA_CMD_FENCE = 30,
SVGA_CMD_ESCAPE = 33,
SVGA_CMD_DEFINE_SCREEN = 34,
SVGA_CMD_DESTROY_SCREEN = 35,
SVGA_CMD_DEFINE_GMRFB = 36,
SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37,
SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
SVGA_CMD_ANNOTATION_FILL = 39,
SVGA_CMD_ANNOTATION_COPY = 40,
SVGA_CMD_DEFINE_GMR2 = 41,
SVGA_CMD_REMAP_GMR2 = 42,
SVGA_CMD_MAX
} SVGAFifoCmdId;
 
#define SVGA_CMD_MAX_ARGS 64
 
 
/*
* SVGA_CMD_UPDATE --
*
* This is a DMA transfer which copies from the Guest Framebuffer
* (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
* intersect with the provided virtual rectangle.
*
* This command does not support using arbitrary guest memory as a
* data source- it only works with the pre-defined GFB memory.
* This command also does not support signed virtual coordinates.
* If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
* negative root x/y coordinates, the negative portion of those
* screens will not be reachable by this command.
*
* This command is not necessary when using framebuffer
* traces. Traces are automatically enabled if the SVGA FIFO is
* disabled, and you may explicitly enable/disable traces using
* SVGA_REG_TRACES. With traces enabled, any write to the GFB will
* automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
*
* Traces and SVGA_CMD_UPDATE are the only supported ways to render
* pseudocolor screen updates. The newer Screen Object commands
* only support true color formats.
*
* Availability:
* Always available.
*/
 
typedef
struct SVGAFifoCmdUpdate {
uint32 x;
uint32 y;
uint32 width;
uint32 height;
} SVGAFifoCmdUpdate;
 
 
/*
* SVGA_CMD_RECT_COPY --
*
* Perform a rectangular DMA transfer from one area of the GFB to
* another, and copy the result to any screens which intersect it.
*
* Availability:
* SVGA_CAP_RECT_COPY
*/
 
typedef
struct SVGAFifoCmdRectCopy {
uint32 srcX;
uint32 srcY;
uint32 destX;
uint32 destY;
uint32 width;
uint32 height;
} SVGAFifoCmdRectCopy;
 
 
/*
* SVGA_CMD_DEFINE_CURSOR --
*
* Provide a new cursor image, as an AND/XOR mask.
*
* The recommended way to position the cursor overlay is by using
* the SVGA_FIFO_CURSOR_* registers, supported by the
* SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
*
* Availability:
* SVGA_CAP_CURSOR
*/
 
typedef
struct SVGAFifoCmdDefineCursor {
uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
uint32 andMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
uint32 xorMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */
/*
* Followed by scanline data for AND mask, then XOR mask.
* Each scanline is padded to a 32-bit boundary.
*/
} SVGAFifoCmdDefineCursor;
 
 
/*
* SVGA_CMD_DEFINE_ALPHA_CURSOR --
*
* Provide a new cursor image, in 32-bit BGRA format.
*
* The recommended way to position the cursor overlay is by using
* the SVGA_FIFO_CURSOR_* registers, supported by the
* SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
*
* Availability:
* SVGA_CAP_ALPHA_CURSOR
*/
 
typedef
struct SVGAFifoCmdDefineAlphaCursor {
uint32 id; /* Reserved, must be zero. */
uint32 hotspotX;
uint32 hotspotY;
uint32 width;
uint32 height;
/* Followed by scanline data */
} SVGAFifoCmdDefineAlphaCursor;
 
 
/*
* SVGA_CMD_UPDATE_VERBOSE --
*
* Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
* 'reason' value, an opaque cookie which is used by internal
* debugging tools. Third party drivers should not use this
* command.
*
* Availability:
* SVGA_CAP_EXTENDED_FIFO
*/
 
typedef
struct SVGAFifoCmdUpdateVerbose {
uint32 x;
uint32 y;
uint32 width;
uint32 height;
uint32 reason;
} SVGAFifoCmdUpdateVerbose;
 
 
/*
* SVGA_CMD_FRONT_ROP_FILL --
*
* This is a hint which tells the SVGA device that the driver has
* just filled a rectangular region of the GFB with a solid
* color. Instead of reading these pixels from the GFB, the device
* can assume that they all equal 'color'. This is primarily used
* for remote desktop protocols.
*
* Availability:
* SVGA_FIFO_CAP_ACCELFRONT
*/
 
#define SVGA_ROP_COPY 0x03
 
typedef
struct SVGAFifoCmdFrontRopFill {
uint32 color; /* In the same format as the GFB */
uint32 x;
uint32 y;
uint32 width;
uint32 height;
uint32 rop; /* Must be SVGA_ROP_COPY */
} SVGAFifoCmdFrontRopFill;
 
 
/*
* SVGA_CMD_FENCE --
*
* Insert a synchronization fence. When the SVGA device reaches
* this command, it will copy the 'fence' value into the
* SVGA_FIFO_FENCE register. It will also compare the fence against
* SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
* SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
* raise this interrupt.
*
* Availability:
* SVGA_FIFO_FENCE for this command,
* SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
*/
 
typedef
struct {
uint32 fence;
} SVGAFifoCmdFence;
 
 
/*
* SVGA_CMD_ESCAPE --
*
* Send an extended or vendor-specific variable length command.
* This is used for video overlay, third party plugins, and
* internal debugging tools. See svga_escape.h
*
* Availability:
* SVGA_FIFO_CAP_ESCAPE
*/
 
typedef
struct SVGAFifoCmdEscape {
uint32 nsid;
uint32 size;
/* followed by 'size' bytes of data */
} SVGAFifoCmdEscape;
 
 
/*
* SVGA_CMD_DEFINE_SCREEN --
*
* Define or redefine an SVGAScreenObject. See the description of
* SVGAScreenObject above. The video driver is responsible for
* generating new screen IDs. They should be small positive
* integers. The virtual device will have an implementation
* specific upper limit on the number of screen IDs
* supported. Drivers are responsible for recycling IDs. The first
* valid ID is zero.
*
* - Interaction with other registers:
*
* For backwards compatibility, when the GFB mode registers (WIDTH,
* HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
* deletes all screens other than screen #0, and redefines screen
* #0 according to the specified mode. Drivers that use
* SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
*
* If you use screen objects, do not use the legacy multi-mon
* registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
struct {
SVGAScreenObject screen; /* Variable-length according to version */
} SVGAFifoCmdDefineScreen;
 
 
/*
* SVGA_CMD_DESTROY_SCREEN --
*
* Destroy an SVGAScreenObject. Its ID is immediately available for
* re-use.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
struct {
uint32 screenId;
} SVGAFifoCmdDestroyScreen;
 
 
/*
* SVGA_CMD_DEFINE_GMRFB --
*
* This command sets a piece of SVGA device state called the
* Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
* piece of light-weight state which identifies the location and
* format of an image in guest memory or in BAR1. The GMRFB has
* an arbitrary size, and it doesn't need to match the geometry
* of the GFB or any screen object.
*
* The GMRFB can be redefined as often as you like. You could
* always use the same GMRFB, you could redefine it before
* rendering from a different guest screen, or you could even
* redefine it before every blit.
*
* There are multiple ways to use this command. The simplest way is
* to use it to move the framebuffer either to elsewhere in the GFB
* (BAR1) memory region, or to a user-defined GMR. This lets a
* driver use a framebuffer allocated entirely out of normal system
* memory, which we encourage.
*
* Another way to use this command is to set up a ring buffer of
* updates in GFB memory. If a driver wants to ensure that no
* frames are skipped by the SVGA device, it is important that the
* driver not modify the source data for a blit until the device is
* done processing the command. One efficient way to accomplish
* this is to use a ring of small DMA buffers. Each buffer is used
* for one blit, then we move on to the next buffer in the
* ring. The FENCE mechanism is used to protect each buffer from
* re-use until the device is finished with that buffer's
* corresponding blit.
*
* This command does not affect the meaning of SVGA_CMD_UPDATE.
* UPDATEs always occur from the legacy GFB memory area. This
* command has no support for pseudocolor GMRFBs. Currently only
* true-color 15, 16, and 24-bit depths are supported. Future
* devices may expose capabilities for additional framebuffer
* formats.
*
* The default GMRFB value is undefined. Drivers must always send
* this command at least once before performing any blit from the
* GMRFB.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
struct {
SVGAGuestPtr ptr;
uint32 bytesPerLine;
SVGAGMRImageFormat format;
} SVGAFifoCmdDefineGMRFB;
 
 
/*
* SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
*
* This is a guest-to-host blit. It performs a DMA operation to
* copy a rectangular region of pixels from the current GMRFB to
* one or more Screen Objects.
*
* The destination coordinate may be specified relative to a
* screen's origin (if a screen ID is specified) or relative to the
* virtual coordinate system's origin (if the screen ID is
* SVGA_ID_INVALID). The actual destination may span zero or more
* screens, in the case of a virtual destination rect or a rect
* which extends off the edge of the specified screen.
*
* This command writes to the screen's "base layer": the underlying
* framebuffer which exists below any cursor or video overlays. No
* action is necessary to explicitly hide or update any overlays
* which exist on top of the updated region.
*
* The SVGA device is guaranteed to finish reading from the GMRFB
* by the time any subsequent FENCE commands are reached.
*
* This command consumes an annotation. See the
* SVGA_CMD_ANNOTATION_* commands for details.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
struct {
SVGASignedPoint srcOrigin;
SVGASignedRect destRect;
uint32 destScreenId;
} SVGAFifoCmdBlitGMRFBToScreen;
 
 
/*
* SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
*
* This is a host-to-guest blit. It performs a DMA operation to
* copy a rectangular region of pixels from a single Screen Object
* back to the current GMRFB.
*
* Usage note: This command should be used rarely. It will
* typically be inefficient, but it is necessary for some types of
* synchronization between 3D (GPU) and 2D (CPU) rendering into
* overlapping areas of a screen.
*
* The source coordinate is specified relative to a screen's
* origin. The provided screen ID must be valid. If any parameters
* are invalid, the resulting pixel values are undefined.
*
* This command reads the screen's "base layer". Overlays like
* video and cursor are not included, but any data which was sent
* using a blit-to-screen primitive will be available, no matter
* whether the data's original source was the GMRFB or the 3D
* acceleration hardware.
*
* Note that our guest-to-host blits and host-to-guest blits aren't
* symmetric in their current implementation. While the parameters
* are identical, host-to-guest blits are a lot less featureful.
* They do not support clipping: If the source parameters don't
* fully fit within a screen, the blit fails. They must originate
* from exactly one screen. Virtual coordinates are not directly
* supported.
*
* Host-to-guest blits do support the same set of GMRFB formats
* offered by guest-to-host blits.
*
* The SVGA device is guaranteed to finish writing to the GMRFB by
* the time any subsequent FENCE commands are reached.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
struct {
SVGASignedPoint destOrigin;
SVGASignedRect srcRect;
uint32 srcScreenId;
} SVGAFifoCmdBlitScreenToGMRFB;
 
 
/*
* SVGA_CMD_ANNOTATION_FILL --
*
* This is a blit annotation. This command stores a small piece of
* device state which is consumed by the next blit-to-screen
* command. The state is only cleared by commands which are
* specifically documented as consuming an annotation. Other
* commands (such as ESCAPEs for debugging) may intervene between
* the annotation and its associated blit.
*
* This annotation is a promise about the contents of the next
* blit: The video driver is guaranteeing that all pixels in that
* blit will have the same value, specified here as a color in
* SVGAColorBGRX format.
*
* The SVGA device can still render the blit correctly even if it
* ignores this annotation, but the annotation may allow it to
* perform the blit more efficiently, for example by ignoring the
* source data and performing a fill in hardware.
*
* This annotation is most important for performance when the
* user's display is being remoted over a network connection.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
struct {
SVGAColorBGRX color;
} SVGAFifoCmdAnnotationFill;
 
 
/*
* SVGA_CMD_ANNOTATION_COPY --
*
* This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
* information about annotations.
*
* This annotation is a promise about the contents of the next
* blit: The video driver is guaranteeing that all pixels in that
* blit will have the same value as those which already exist at an
* identically-sized region on the same or a different screen.
*
* Note that the source pixels for the COPY in this annotation are
* sampled before applying the anqnotation's associated blit. They
* are allowed to overlap with the blit's destination pixels.
*
* The copy source rectangle is specified the same way as the blit
* destination: it can be a rectangle which spans zero or more
* screens, specified relative to either a screen or to the virtual
* coordinate system's origin. If the source rectangle includes
* pixels which are not from exactly one screen, the results are
* undefined.
*
* Availability:
* SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
*/
 
typedef
struct {
SVGASignedPoint srcOrigin;
uint32 srcScreenId;
} SVGAFifoCmdAnnotationCopy;
 
 
/*
* SVGA_CMD_DEFINE_GMR2 --
*
* Define guest memory region v2. See the description of GMRs above.
*
* Availability:
* SVGA_CAP_GMR2
*/
 
typedef
struct {
uint32 gmrId;
uint32 numPages;
} SVGAFifoCmdDefineGMR2;
 
 
/*
* SVGA_CMD_REMAP_GMR2 --
*
* Remap guest memory region v2. See the description of GMRs above.
*
* This command allows guest to modify a portion of an existing GMR by
* invalidating it or reassigning it to different guest physical pages.
* The pages are identified by physical page number (PPN). The pages
* are assumed to be pinned and valid for DMA operations.
*
* Description of command flags:
*
* SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
* The PPN list must not overlap with the remap region (this can be
* handled trivially by referencing a separate GMR). If flag is
* disabled, PPN list is appended to SVGARemapGMR command.
*
* SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
* it is in PPN32 format.
*
* SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
* A single PPN can be used to invalidate a portion of a GMR or
* map it to to a single guest scratch page.
*
* Availability:
* SVGA_CAP_GMR2
*/
 
typedef enum {
SVGA_REMAP_GMR2_PPN32 = 0,
SVGA_REMAP_GMR2_VIA_GMR = (1 << 0),
SVGA_REMAP_GMR2_PPN64 = (1 << 1),
SVGA_REMAP_GMR2_SINGLE_PPN = (1 << 2),
} SVGARemapGMR2Flags;
 
typedef
struct {
uint32 gmrId;
SVGARemapGMR2Flags flags;
uint32 offsetPages; /* offset in pages to begin remap */
uint32 numPages; /* number of pages to remap */
/*
* Followed by additional data depending on SVGARemapGMR2Flags.
*
* If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
* Otherwise an array of page descriptors in PPN32 or PPN64 format
* (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag
* SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
*/
} SVGAFifoCmdRemapGMR2;
 
#endif
/drivers/video/drm/vmwgfx/svga_types.h
0,0 → 1,45
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
/**
* Silly typedefs for the svga headers. Currently the headers are shared
* between all components that talk to svga. And as such the headers are
* are in a completely different style and use weird defines.
*
* This file lets all the ugly be prefixed with svga*.
*/
 
#ifndef _SVGA_TYPES_H_
#define _SVGA_TYPES_H_
 
typedef uint16_t uint16;
typedef uint32_t uint32;
typedef uint8_t uint8;
typedef int32_t int32;
typedef bool Bool;
 
#endif
/drivers/video/drm/vmwgfx/vmw.lds
0,0 → 1,57
 
 
OUTPUT_FORMAT(pei-i386)
 
ENTRY("_drvEntry")
 
SECTIONS
{
. = SIZEOF_HEADERS;
. = ALIGN(__section_alignment__);
 
.text __image_base__ + ( __section_alignment__ < 0x1000 ? . : __section_alignment__ ) :
 
{
*(.text) *(.rdata)
}
 
.data ALIGN(__section_alignment__) :
{
*(.data)
}
 
.bss ALIGN(__section_alignment__):
{
*(.bss)
*(COMMON)
}
 
/DISCARD/ :
{
*(.debug$S)
*(.debug$T)
*(.debug$F)
*(.drectve)
*(.edata)
*(.eh_frame)
}
 
.idata ALIGN(__section_alignment__):
{
SORT(*)(.idata$2)
SORT(*)(.idata$3)
/* These zeroes mark the end of the import list. */
LONG (0); LONG (0); LONG (0); LONG (0); LONG (0);
SORT(*)(.idata$4)
SORT(*)(.idata$5)
SORT(*)(.idata$6)
SORT(*)(.idata$7)
}
 
.reloc ALIGN(__section_alignment__) :
{
*(.reloc)
}
 
}
 
/drivers/video/drm/vmwgfx/vmwgfx.asm
0,0 → 1,46
 
use32
 
db 'MENUET01'
dd 1
dd start
dd i_end
dd mem
dd mem
dd cmdline
dd path
 
start:
mov eax, 68
mov ebx, 16
mov ecx, sz_display
int 0x40
test eax, eax
jnz .done ; FIXME parse command line and
; call service
 
xor eax, eax
mov ecx, 1024
mov edi, path
cld
repne scasb
dec edi
mov [edi], dword '.dll'
mov [edi+4], al
mov eax, 68
mov ebx, 21
mov ecx, path
mov edx, cmdline
int 0x40
.done:
mov eax, -1
int 0x40
 
sz_display db 'DISPLAY',0
 
align 4
i_end:
cmdline rb 256
path rb 1024
rb 16 ; stack
mem:
/drivers/video/drm/vmwgfx/vmwgfx_buffer.c
0,0 → 1,352
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
 
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
 
static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
 
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
 
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED;
 
static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
 
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_placement_flags
};
 
static uint32_t vram_gmr_placement_flags[] = {
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
 
static uint32_t gmr_vram_placement_flags[] = {
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
};
 
struct ttm_placement vmw_vram_gmr_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 2,
.placement = vram_gmr_placement_flags,
.num_busy_placement = 1,
.busy_placement = &gmr_placement_flags
};
 
static uint32_t vram_gmr_ne_placement_flags[] = {
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
 
struct ttm_placement vmw_vram_gmr_ne_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 2,
.placement = vram_gmr_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &gmr_ne_placement_flags
};
 
struct ttm_placement vmw_vram_sys_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
 
struct ttm_placement vmw_vram_ne_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_ne_placement_flags
};
 
struct ttm_placement vmw_sys_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &sys_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
 
static uint32_t evictable_placement_flags[] = {
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
 
struct ttm_placement vmw_evictable_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 3,
.placement = evictable_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
 
struct ttm_placement vmw_srf_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.num_busy_placement = 2,
.placement = &gmr_placement_flags,
.busy_placement = gmr_vram_placement_flags
};
 
struct vmw_ttm_tt {
struct ttm_tt ttm;
struct vmw_private *dev_priv;
int gmr_id;
};
 
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
vmw_be->gmr_id = bo_mem->start;
 
return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
ttm->num_pages, vmw_be->gmr_id);
}
 
static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0;
}
 
static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
 
ttm_tt_fini(ttm);
kfree(vmw_be);
}
 
static struct ttm_backend_func vmw_ttm_func = {
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy,
};
 
struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct vmw_ttm_tt *vmw_be;
 
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
 
vmw_be->ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
kfree(vmw_be);
return NULL;
}
 
return &vmw_be->ttm;
}
 
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
 
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
 
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->func = &ttm_bo_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case VMW_PL_GMR:
/*
* "Guest Memory Regions" is an aperture like feature with
* one slot per bo. There is an upper limit of the number of
* slots as well as the bo size.
*/
man->func = &vmw_gmrid_manager_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
 
void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
*placement = vmw_sys_placement;
}
 
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
// struct ttm_object_file *tfile =
// vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
 
return 0; //vmw_user_dmabuf_verify_access(bo, tfile);
}
 
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
 
mem->bus.addr = NULL;
mem->bus.is_iomem = false;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
case VMW_PL_GMR:
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->vram_start;
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
 
static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
 
static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
return 0;
}
 
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
*/
 
static void *vmw_sync_obj_ref(void *sync_obj)
{
 
return (void *)
vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
}
 
static void vmw_sync_obj_unref(void **sync_obj)
{
vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
}
 
static int vmw_sync_obj_flush(void *sync_obj)
{
vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
return 0;
}
 
static bool vmw_sync_obj_signaled(void *sync_obj)
{
return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
DRM_VMW_FENCE_FLAG_EXEC);
 
}
 
static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
DRM_VMW_FENCE_FLAG_EXEC,
lazy, interruptible,
VMW_FENCE_WAIT_TIMEOUT);
}
 
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &ttm_pool_populate,
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
.sync_obj_signaled = vmw_sync_obj_signaled,
.sync_obj_wait = vmw_sync_obj_wait,
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
.move_notify = NULL,
.swap_notify = NULL,
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
};
/drivers/video/drm/vmwgfx/vmwgfx_context.c
0,0 → 1,276
/**************************************************************************
*
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
 
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
};
 
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
 
static uint64_t vmw_user_context_size;
 
static const struct vmw_user_resource_conv user_context_conv = {
.object_type = VMW_RES_CONTEXT,
.base_obj_to_res = vmw_user_context_base_to_res,
.res_free = vmw_user_context_free
};
 
const struct vmw_user_resource_conv *user_context_converter =
&user_context_conv;
 
 
static const struct vmw_res_func vmw_legacy_context_func = {
.res_type = vmw_res_context,
.needs_backup = false,
.may_evict = false,
.type_name = "legacy contexts",
.backup_placement = NULL,
.create = NULL,
.destroy = NULL,
.bind = NULL,
.unbind = NULL
};
 
/**
* Context management:
*/
 
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
 
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyContext body;
} *cmd;
 
 
vmw_execbuf_release_pinned_bo(dev_priv);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
return;
}
 
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
cmd->body.cid = cpu_to_le32(res->id);
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_3d_resource_dec(dev_priv, false);
}
 
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
void (*res_free) (struct vmw_resource *res))
{
int ret;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineContext body;
} *cmd;
 
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
 
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a resource id.\n");
goto out_early;
}
 
if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
DRM_ERROR("Out of hw context ids.\n");
vmw_resource_unreference(&res);
return -ENOMEM;
}
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
vmw_resource_unreference(&res);
return -ENOMEM;
}
 
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
cmd->body.cid = cpu_to_le32(res->id);
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
(void) vmw_3d_resource_inc(dev_priv, false);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
 
out_early:
if (res_free == NULL)
kfree(res);
else
res_free(res);
return ret;
}
 
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
{
struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
int ret;
 
if (unlikely(res == NULL))
return NULL;
 
ret = vmw_context_init(dev_priv, res, NULL);
 
return (ret == 0) ? res : NULL;
}
 
/**
* User-space context management:
*/
 
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base)
{
return &(container_of(base, struct vmw_user_context, base)->res);
}
 
static void vmw_user_context_free(struct vmw_resource *res)
{
struct vmw_user_context *ctx =
container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
 
// ttm_base_object_kfree(ctx, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
}
 
/**
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
 
static void vmw_user_context_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_context *ctx =
container_of(base, struct vmw_user_context, base);
struct vmw_resource *res = &ctx->res;
 
*p_base = NULL;
vmw_resource_unreference(&res);
}
 
#if 0
int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
}
 
int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_context *ctx;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
 
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of contexts anyway.
*/
 
if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
 
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_context_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for context"
" creation.\n");
goto out_unlock;
}
 
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (unlikely(ctx == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
ret = -ENOMEM;
goto out_unlock;
}
 
res = &ctx->res;
ctx->base.shareable = false;
ctx->base.tfile = NULL;
 
/*
* From here on, the destructor takes over resource freeing.
*/
 
ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
if (unlikely(ret != 0))
goto out_unlock;
 
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
&vmw_user_context_base_release, NULL);
 
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
 
arg->cid = ctx->base.hash.key;
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
 
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_dmabuf.c
0,0 → 1,320
/**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include <drm/ttm/ttm_placement.h>
 
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
 
 
/**
* vmw_dmabuf_to_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer if true.
* @interruptible: Use interruptible wait.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
struct ttm_placement *placement,
bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
int ret;
 
// ret = ttm_write_lock(&vmaster->lock, interruptible);
// if (unlikely(ret != 0))
// return ret;
 
vmw_execbuf_release_pinned_bo(dev_priv);
 
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
 
ret = ttm_bo_validate(bo, placement, interruptible, false);
 
ttm_bo_unreserve(bo);
 
err:
// ttm_write_unlock(&vmaster->lock);
return ret;
}
 
/**
* vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement;
int ret;
 
// ret = ttm_write_lock(&vmaster->lock, interruptible);
// if (unlikely(ret != 0))
// return ret;
 
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
 
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
 
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
 
if (pin)
placement = &vmw_vram_gmr_ne_placement;
else
placement = &vmw_vram_gmr_placement;
 
ret = ttm_bo_validate(bo, placement, interruptible, false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto err_unreserve;
 
 
/**
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
 
if (pin)
placement = &vmw_vram_ne_placement;
else
placement = &vmw_vram_placement;
 
ret = ttm_bo_validate(bo, placement, interruptible, false);
 
err_unreserve:
ttm_bo_unreserve(bo);
err:
// ttm_write_unlock(&vmaster->lock);
return ret;
}
 
/**
* vmw_dmabuf_to_vram - Move a buffer to vram.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
struct ttm_placement *placement;
 
if (pin)
placement = &vmw_vram_ne_placement;
else
placement = &vmw_vram_placement;
 
return vmw_dmabuf_to_placement(dev_priv, buf,
placement,
interruptible);
}
 
/**
* vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
int ret = 0;
 
if (pin)
placement = vmw_vram_ne_placement;
else
placement = vmw_vram_placement;
placement.lpfn = bo->num_pages;
 
// ret = ttm_write_lock(&vmaster->lock, interruptible);
// if (unlikely(ret != 0))
// return ret;
 
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
 
/* Is this buffer already in vram but not at the start of it? */
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
 
ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
/* For some reason we didn't up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
 
ttm_bo_unreserve(bo);
err_unlock:
// ttm_write_unlock(&vmaster->lock);
 
return ret;
}
 
 
/**
* vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
*
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
/*
* We could in theory early out if the buffer is
* unpinned but we need to lock and reserve the buffer
* anyways so we don't gain much by that.
*/
return vmw_dmabuf_to_placement(dev_priv, buf,
&vmw_evictable_placement,
interruptible);
}
 
 
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
* of a buffer.
*
* @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
* @ptr: SVGAGuestPtr returning the result.
*/
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
if (bo->mem.mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
ptr->offset = bo->offset;
} else {
ptr->gmrId = bo->mem.start;
ptr->offset = 0;
}
}
 
 
/**
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
*
* @bo: The buffer object. Must be reserved, and present either in VRAM
* or GMR memory.
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
{
uint32_t pl_flags;
struct ttm_placement placement;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
 
lockdep_assert_held(&bo->resv->lock.base);
BUG_ON(old_mem_type != TTM_PL_VRAM &&
old_mem_type != VMW_PL_GMR);
 
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
if (pin)
pl_flags |= TTM_PL_FLAG_NO_EVICT;
 
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
placement.placement = &pl_flags;
 
ret = ttm_bo_validate(bo, &placement, false, true);
 
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
/drivers/video/drm/vmwgfx/vmwgfx_drv.c
0,0 → 1,1119
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <linux/module.h>
 
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
//#include <drm/ttm/ttm_module.h>
 
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0
 
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
 
struct drm_device *main_device;
 
struct drm_file *drm_file_handlers[256];
 
#if 0
/**
* Fully encoded drm commands. Might move to vmw_drm.h
*/
 
#define DRM_IOCTL_VMW_GET_PARAM \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
struct drm_vmw_getparam_arg)
#define DRM_IOCTL_VMW_ALLOC_DMABUF \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
union drm_vmw_alloc_dmabuf_arg)
#define DRM_IOCTL_VMW_UNREF_DMABUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
struct drm_vmw_unref_dmabuf_arg)
#define DRM_IOCTL_VMW_CURSOR_BYPASS \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
struct drm_vmw_cursor_bypass_arg)
 
#define DRM_IOCTL_VMW_CONTROL_STREAM \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
struct drm_vmw_control_stream_arg)
#define DRM_IOCTL_VMW_CLAIM_STREAM \
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
struct drm_vmw_stream_arg)
#define DRM_IOCTL_VMW_UNREF_STREAM \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
struct drm_vmw_stream_arg)
 
#define DRM_IOCTL_VMW_CREATE_CONTEXT \
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_UNREF_CONTEXT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_CREATE_SURFACE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
union drm_vmw_surface_create_arg)
#define DRM_IOCTL_VMW_UNREF_SURFACE \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
struct drm_vmw_surface_arg)
#define DRM_IOCTL_VMW_REF_SURFACE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
union drm_vmw_surface_reference_arg)
#define DRM_IOCTL_VMW_EXECBUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
struct drm_vmw_execbuf_arg)
#define DRM_IOCTL_VMW_GET_3D_CAP \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
struct drm_vmw_get_3d_cap_arg)
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
#define DRM_IOCTL_VMW_FENCE_SIGNALED \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
struct drm_vmw_fence_signaled_arg)
#define DRM_IOCTL_VMW_FENCE_UNREF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
struct drm_vmw_fence_arg)
#define DRM_IOCTL_VMW_FENCE_EVENT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
struct drm_vmw_fence_event_arg)
#define DRM_IOCTL_VMW_PRESENT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
struct drm_vmw_present_arg)
#define DRM_IOCTL_VMW_PRESENT_READBACK \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
struct drm_vmw_present_readback_arg)
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
 
/**
* The core DRM version of this macro doesn't account for
* DRM_COMMAND_BASE.
*/
 
#define VMW_IOCTL_DEF(ioctl, func, flags) \
[DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
 
/**
* Ioctl definitions.
*/
 
static struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
vmw_fence_obj_signaled_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_EVENT,
vmw_fence_event_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED),
 
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
vmw_present_readback_ioctl,
DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_UNLOCKED),
};
#endif
 
static struct pci_device_id vmw_pci_id_list[] = {
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
{0, 0, 0}
};
 
static int enable_fbdev = 1;
 
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
 
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
 
static void vmw_print_capabilities(uint32_t capabilities)
{
DRM_INFO("Capabilities:\n");
if (capabilities & SVGA_CAP_RECT_COPY)
DRM_INFO(" Rect copy.\n");
if (capabilities & SVGA_CAP_CURSOR)
DRM_INFO(" Cursor.\n");
if (capabilities & SVGA_CAP_CURSOR_BYPASS)
DRM_INFO(" Cursor bypass.\n");
if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
DRM_INFO(" Cursor bypass 2.\n");
if (capabilities & SVGA_CAP_8BIT_EMULATION)
DRM_INFO(" 8bit emulation.\n");
if (capabilities & SVGA_CAP_ALPHA_CURSOR)
DRM_INFO(" Alpha cursor.\n");
if (capabilities & SVGA_CAP_3D)
DRM_INFO(" 3D.\n");
if (capabilities & SVGA_CAP_EXTENDED_FIFO)
DRM_INFO(" Extended Fifo.\n");
if (capabilities & SVGA_CAP_MULTIMON)
DRM_INFO(" Multimon.\n");
if (capabilities & SVGA_CAP_PITCHLOCK)
DRM_INFO(" Pitchlock.\n");
if (capabilities & SVGA_CAP_IRQMASK)
DRM_INFO(" Irq mask.\n");
if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
DRM_INFO(" Display Topology.\n");
if (capabilities & SVGA_CAP_GMR)
DRM_INFO(" GMR.\n");
if (capabilities & SVGA_CAP_TRACES)
DRM_INFO(" Traces.\n");
if (capabilities & SVGA_CAP_GMR2)
DRM_INFO(" GMR2.\n");
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
DRM_INFO(" Screen Object 2.\n");
}
 
 
/**
* vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
* the start of a buffer object.
*
* @dev_priv: The device private structure.
*
* This function will idle the buffer using an uninterruptible wait, then
* map the first page and initialize a pending occlusion query result structure,
* Finally it will unmap the buffer.
*
* TODO: Since we're only mapping a single page, we should optimize the map
* to use kmap_atomic / iomap_atomic.
*/
static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
{
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
int ret;
struct ttm_bo_device *bdev = &dev_priv->bdev;
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
 
ttm_bo_reserve(bo, false, false, false, 0);
spin_lock(&bdev->fence_lock);
ret = 0; //ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0))
(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
10*HZ);
/*
ret = ttm_bo_kmap(bo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
result->state = SVGA3D_QUERYSTATE_PENDING;
result->result32 = 0xff;
ttm_bo_kunmap(&map);
} else
DRM_ERROR("Dummy query buffer map failed.\n");
*/
ttm_bo_unreserve(bo);
}
 
 
/**
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result
*
* @dev_priv: A device private structure.
*
* This function creates a small buffer object that holds the query
* result for dummy queries emitted as query barriers.
* No interruptible waits are done within this function.
*
* Returns an error if bo creation fails.
*/
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
return ttm_bo_create(&dev_priv->bdev,
PAGE_SIZE,
ttm_bo_type_device,
&vmw_vram_sys_placement,
0, false, NULL,
&dev_priv->dummy_query_bo);
}
 
 
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
ENTER();
 
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize FIFO.\n");
return ret;
}
// vmw_fence_fifo_up(dev_priv->fman);
// ret = vmw_dummy_query_bo_create(dev_priv);
// if (unlikely(ret != 0))
// goto out_no_query_bo;
// vmw_dummy_query_bo_prepare(dev_priv);
 
LEAVE();
 
return 0;
 
out_no_query_bo:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
}
 
static void vmw_release_device(struct vmw_private *dev_priv)
{
/*
* Previous destructions should've released
* the pinned bo.
*/
 
BUG_ON(dev_priv->pinned_bo != NULL);
 
ttm_bo_unref(&dev_priv->dummy_query_bo);
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
 
/**
* Increase the 3d resource refcount.
* If the count was prevously zero, initialize the fifo, switching to svga
* mode. Note that the master holds a ref as well, and may request an
* explicit switch to svga mode if fb is not running, using @unhide_svga.
*/
int vmw_3d_resource_inc(struct vmw_private *dev_priv,
bool unhide_svga)
{
int ret = 0;
 
ENTER();
 
mutex_lock(&dev_priv->release_mutex);
if (unlikely(dev_priv->num_3d_resources++ == 0)) {
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
} else if (unhide_svga) {
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE);
mutex_unlock(&dev_priv->hw_mutex);
}
 
mutex_unlock(&dev_priv->release_mutex);
LEAVE();
return ret;
}
 
/**
* Decrease the 3d resource refcount.
* If the count reaches zero, disable the fifo, switching to vga mode.
* Note that the master holds a refcount as well, and may request an
* explicit switch to vga mode when it releases its refcount to account
* for the situation of an X server vt switch to VGA with 3d resources
* active.
*/
void vmw_3d_resource_dec(struct vmw_private *dev_priv,
bool hide_svga)
{
int32_t n3d;
 
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
else if (hide_svga) {
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE);
mutex_unlock(&dev_priv->hw_mutex);
}
 
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
 
BUG_ON(n3d < 0);
}
 
/**
* Sets the initial_[width|height] fields on the given vmw_private.
*
* It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
* clamping the value to fb_max_[width|height] fields and the
* VMW_MIN_INITIAL_[WIDTH|HEIGHT].
* If the values appear to be invalid, set them to
* VMW_MIN_INITIAL_[WIDTH|HEIGHT].
*/
static void vmw_get_initial_size(struct vmw_private *dev_priv)
{
uint32_t width;
uint32_t height;
 
width = vmw_read(dev_priv, SVGA_REG_WIDTH);
height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
 
width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
 
if (width > dev_priv->fb_max_width ||
height > dev_priv->fb_max_height) {
 
/*
* This is a host error and shouldn't occur.
*/
 
width = VMW_MIN_INITIAL_WIDTH;
height = VMW_MIN_INITIAL_HEIGHT;
}
 
dev_priv->initial_width = width;
dev_priv->initial_height = height;
}
 
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
int ret;
uint32_t svga_id;
enum vmw_res_type i;
 
ENTER();
 
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
DRM_ERROR("Failed allocating a device private struct.\n");
return -ENOMEM;
}
 
pci_set_master(dev->pdev);
 
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock);
 
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
}
 
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
 
dev_priv->used_memory_size = 0;
 
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
printk("io: %x vram: %x mmio: %x\n",dev_priv->io_start,
dev_priv->vram_start,dev_priv->mmio_start);
 
dev_priv->enable_fb = enable_fbdev;
 
mutex_lock(&dev_priv->hw_mutex);
 
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) {
ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
mutex_unlock(&dev_priv->hw_mutex);
goto out_err0;
}
 
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 
vmw_get_initial_size(dev_priv);
 
if (dev_priv->capabilities & SVGA_CAP_GMR) {
dev_priv->max_gmr_descriptors =
vmw_read(dev_priv,
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
}
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
dev_priv->max_gmr_pages =
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
dev_priv->memory_size =
vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
dev_priv->memory_size -= dev_priv->vram_size;
} else {
/*
* An arbitrary limit of 512MiB on surface
* memory. But all HWV8 hardware supports GMR2.
*/
dev_priv->memory_size = 512*1024*1024;
}
 
mutex_unlock(&dev_priv->hw_mutex);
 
vmw_print_capabilities(dev_priv->capabilities);
 
if (dev_priv->capabilities & SVGA_CAP_GMR) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
DRM_INFO("Max GMR descriptors is %u\n",
(unsigned)dev_priv->max_gmr_descriptors);
}
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max number of GMR pages is %u\n",
(unsigned)dev_priv->max_gmr_pages);
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
(unsigned)dev_priv->memory_size / 1024);
}
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 
ret = vmw_ttm_global_init(dev_priv);
if (unlikely(ret != 0))
goto out_err0;
 
 
 
 
ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
goto out_err1;
}
 
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
(dev_priv->vram_size >> PAGE_SHIFT));
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_err2;
}
 
dev_priv->has_gmr = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
dev_priv->max_gmr_ids) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
 
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
dev_priv->mmio_size);
 
if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
DRM_ERROR("Failed mapping MMIO.\n");
goto out_err3;
}
 
/* Need mmio memory to check for fifo pitchlock cap. */
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
!(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
goto out_err4;
}
 
dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12);
 
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
ret = -ENOMEM;
goto out_err4;
}
 
dev->dev_private = dev_priv;
 
#if 0
 
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
ret = drm_irq_install(dev);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
}
}
 
dev_priv->fman = vmw_fence_manager_init(dev_priv);
if (unlikely(dev_priv->fman == NULL))
goto out_no_fman;
 
vmw_kms_save_vga(dev_priv);
#endif
 
/* Start kms and overlay systems, needs fifo. */
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
 
if (dev_priv->enable_fb) {
ret = vmw_3d_resource_inc(dev_priv, true);
if (unlikely(ret != 0))
goto out_no_fifo;
// vmw_fb_init(dev_priv);
}
 
return 0;
 
out_no_fifo:
// vmw_overlay_close(dev_priv);
// vmw_kms_close(dev_priv);
out_no_kms:
// vmw_kms_restore_vga(dev_priv);
// vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
// if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
// drm_irq_uninstall(dev_priv->dev);
out_no_irq:
// if (dev_priv->stealth)
// pci_release_region(dev->pdev, 2);
// else
// pci_release_regions(dev->pdev);
out_no_device:
// ttm_object_device_release(&dev_priv->tdev);
out_err4:
// iounmap(dev_priv->mmio_virt);
out_err3:
// arch_phys_wc_del(dev_priv->mmio_mtrr);
// if (dev_priv->has_gmr)
// (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
// (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_err2:
// (void)ttm_bo_device_release(&dev_priv->bdev);
out_err1:
// vmw_ttm_global_release(dev_priv);
out_err0:
// for (i = vmw_res_context; i < vmw_res_max; ++i)
// idr_destroy(&dev_priv->res_idr[i]);
 
kfree(dev_priv);
return ret;
}
 
#if 0
static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
enum vmw_res_type i;
 
unregister_pm_notifier(&dev_priv->pm_nb);
 
if (dev_priv->ctx.res_ht_initialized)
drm_ht_remove(&dev_priv->ctx.res_ht);
if (dev_priv->ctx.cmd_bounce)
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, false);
}
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
pci_release_regions(dev->pdev);
 
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv);
 
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
 
kfree(dev_priv);
 
return 0;
}
 
static void vmw_preclose(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_private *dev_priv = vmw_priv(dev);
 
vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
}
 
static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_fpriv *vmw_fp;
 
vmw_fp = vmw_fpriv(file_priv);
ttm_object_file_release(&vmw_fp->tfile);
if (vmw_fp->locked_master)
drm_master_put(&vmw_fp->locked_master);
kfree(vmw_fp);
}
#endif
 
static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fpriv *vmw_fp;
int ret = -ENOMEM;
 
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
if (unlikely(vmw_fp == NULL))
return ret;
 
INIT_LIST_HEAD(&vmw_fp->fence_events);
// vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
// if (unlikely(vmw_fp->tfile == NULL))
// goto out_no_tfile;
 
file_priv->driver_priv = vmw_fp;
// dev_priv->bdev.dev_mapping = dev->dev_mapping;
 
return 0;
 
out_no_tfile:
kfree(vmw_fp);
return ret;
}
 
#if 0
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
unsigned int nr = DRM_IOCTL_NR(cmd);
 
/*
* Do extra checking on driver private ioctls.
*/
 
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
 
if (unlikely(ioctl->cmd_drv != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
}
}
 
return drm_ioctl(filp, cmd, arg);
}
 
static int vmw_firstopen(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
dev_priv->is_opened = true;
 
return 0;
}
 
static void vmw_lastclose(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_crtc *crtc;
struct drm_mode_set set;
int ret;
 
/**
* Do nothing on the lastclose call from drm_unload.
*/
 
if (!dev_priv->is_opened)
return;
 
dev_priv->is_opened = false;
set.x = 0;
set.y = 0;
set.fb = NULL;
set.mode = NULL;
set.connectors = NULL;
set.num_connectors = 0;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
set.crtc = crtc;
ret = drm_mode_set_config_internal(&set);
WARN_ON(ret != 0);
}
 
}
 
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
INIT_LIST_HEAD(&vmaster->fb_surf);
mutex_init(&vmaster->fb_surf_mutex);
}
 
static int vmw_master_create(struct drm_device *dev,
struct drm_master *master)
{
struct vmw_master *vmaster;
 
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (unlikely(vmaster == NULL))
return -ENOMEM;
 
vmw_master_init(vmaster);
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
master->driver_priv = vmaster;
 
return 0;
}
 
static void vmw_master_destroy(struct drm_device *dev,
struct drm_master *master)
{
struct vmw_master *vmaster = vmw_master(master);
 
master->driver_priv = NULL;
kfree(vmaster);
}
 
 
static int vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv,
bool from_open)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *active = dev_priv->active_master;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
 
if (!dev_priv->enable_fb) {
ret = vmw_3d_resource_inc(dev_priv, true);
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
mutex_unlock(&dev_priv->hw_mutex);
}
 
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
if (unlikely(ret != 0))
goto out_no_active_lock;
 
ttm_lock_set_kill(&active->lock, true, SIGTERM);
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0)) {
DRM_ERROR("Unable to clean VRAM on "
"master drop.\n");
}
 
dev_priv->active_master = NULL;
}
 
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
if (!from_open) {
ttm_vt_unlock(&vmaster->lock);
BUG_ON(vmw_fp->locked_master != file_priv->master);
drm_master_put(&vmw_fp->locked_master);
}
 
dev_priv->active_master = vmaster;
 
return 0;
 
out_no_active_lock:
if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
}
return ret;
}
 
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv,
bool from_release)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/**
* Make sure the master doesn't disappear while we have
* it locked.
*/
 
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_execbuf_release_pinned_bo(dev_priv);
 
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
drm_master_put(&vmw_fp->locked_master);
}
 
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 
if (!dev_priv->enable_fb) {
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0))
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
}
 
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
}
 
#endif
 
 
 
 
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
// .load = vmw_driver_load,
// .unload = vmw_driver_unload,
// .firstopen = vmw_firstopen,
// .lastclose = vmw_lastclose,
.irq_preinstall = vmw_irq_preinstall,
.irq_postinstall = vmw_irq_postinstall,
// .irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
// .get_vblank_counter = vmw_get_vblank_counter,
// .enable_vblank = vmw_enable_vblank,
// .disable_vblank = vmw_disable_vblank,
// .ioctls = vmw_ioctls,
// .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
// .dma_quiescent = NULL, /*vmw_dma_quiescent, */
// .master_create = vmw_master_create,
// .master_destroy = vmw_master_destroy,
// .master_set = vmw_master_set,
// .master_drop = vmw_master_drop,
.open = vmw_driver_open,
// .preclose = vmw_preclose,
// .postclose = vmw_postclose,
 
// .dumb_create = vmw_dumb_create,
// .dumb_map_offset = vmw_dumb_map_offset,
// .dumb_destroy = vmw_dumb_destroy,
 
// .fops = &vmwgfx_driver_fops,
// .name = VMWGFX_DRIVER_NAME,
// .desc = VMWGFX_DRIVER_DESC,
// .date = VMWGFX_DRIVER_DATE,
// .major = VMWGFX_DRIVER_MAJOR,
// .minor = VMWGFX_DRIVER_MINOR,
// .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
};
 
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static struct drm_device drm_dev;
static struct drm_file drm_file;
 
struct drm_device *dev;
struct drm_file *priv;
 
int ret;
 
dev = &drm_dev;
priv = &drm_file;
 
drm_file_handlers[0] = priv;
 
// ret = pci_enable_device(pdev);
// if (ret)
// goto err_g1;
 
pci_set_master(pdev);
 
// if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
// printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
// goto err_g2;
// }
 
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
 
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
 
spin_lock_init(&dev->count_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
 
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
 
idr_init(&priv->object_idr);
spin_lock_init(&priv->table_lock);
 
dev->driver = &driver;
 
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
goto err_g4;
}
 
ret = vmw_driver_load(dev, ent->driver_data );
 
if (ret)
goto err_g4;
 
// ret = init_display_kms(dev);
 
if (ret)
goto err_g4;
 
return 0;
 
err_g4:
//err_g3:
// if (drm_core_check_feature(dev, DRIVER_MODESET))
// drm_put_minor(&dev->control);
//err_g2:
// pci_disable_device(pdev);
//err_g1:
 
return ret;
}
 
int vmw_init(void)
{
static pci_dev_t device;
const struct pci_device_id *ent;
int err;
 
ent = find_pci_device(&device, vmw_pci_id_list);
if( unlikely(ent == NULL) )
{
dbgprintf("device not found\n");
return -ENODEV;
};
 
 
DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device);
 
drm_global_init();
 
err = drm_get_dev(&device.pci_dev, ent);
 
return err;
}
 
 
 
//module_init(vmwgfx_init);
//module_exit(vmwgfx_exit);
 
MODULE_AUTHOR("VMware Inc. and others");
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
MODULE_LICENSE("GPL and additional rights");
 
/drivers/video/drm/vmwgfx/vmwgfx_drv.h
0,0 → 1,787
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_
 
#include "vmwgfx_reg.h"
#include <drm/drmP.h>
#include <drm/vmwgfx_drm.h>
#include <drm/drm_hashtab.h>
//#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
//#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_execbuf_util.h>
//#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
 
#define VMWGFX_DRIVER_DATE "20120209"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 4
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
 
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
 
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
#define VMW_RES_STREAM ttm_driver_type2
#define VMW_RES_FENCE ttm_driver_type3
 
#define ioread32(addr) readl(addr)
 
static inline void outl(u32 v, u16 port)
{
asm volatile("outl %0,%1" : : "a" (v), "dN" (port));
}
static inline u32 inl(u16 port)
{
u32 v;
asm volatile("inl %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
struct ttm_lock{};
struct ww_acquire_ctx{};
 
struct vmw_fpriv {
// struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
};
 
struct vmw_dma_buffer {
struct ttm_buffer_object base;
struct list_head res_list;
};
 
/**
* struct vmw_validate_buffer - Carries validation info about buffers.
*
* @base: Validation info for TTM.
* @hash: Hash entry for quick lookup of the TTM buffer object.
*
* This structure contains also driver private validation info
* on top of the info needed by TTM.
*/
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
};
 
struct vmw_res_func;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
bool avail;
unsigned long backup_size;
bool res_dirty; /* Protected by backup buffer reserved */
bool backup_dirty; /* Protected by backup buffer reserved */
struct vmw_dma_buffer *backup;
unsigned long backup_offset;
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
 
enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
vmw_res_stream,
vmw_res_max
};
 
struct vmw_cursor_snooper {
struct drm_crtc *crtc;
size_t age;
uint32_t *image;
};
 
struct vmw_framebuffer;
struct vmw_surface_offset;
 
struct vmw_surface {
struct vmw_resource res;
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
struct drm_vmw_size base_size;
struct drm_vmw_size *sizes;
uint32_t num_sizes;
bool scanout;
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
struct vmw_surface_offset *offsets;
SVGA3dTextureFilter autogen_filter;
uint32_t multisample_count;
};
 
struct vmw_marker_queue {
struct list_head head;
struct timespec lag;
struct timespec lag_time;
spinlock_t lock;
};
 
struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
__le32 *static_buffer;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
struct vmw_marker_queue marker_queue;
};
 
struct vmw_relocation {
SVGAGuestPtr *location;
uint32_t index;
};
 
/**
* struct vmw_res_cache_entry - resource information cache entry
*
* @valid: Whether the entry is valid, which also implies that the execbuf
* code holds a reference to the resource, and it's placed on the
* validation list.
* @handle: User-space handle of a resource.
* @res: Non-ref-counted pointer to the resource.
*
* Used to avoid frequent repeated user-space handle lookups of the
* same resource.
*/
struct vmw_res_cache_entry {
bool valid;
uint32_t handle;
struct vmw_resource *res;
struct vmw_resource_val_node *node;
};
 
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
struct ttm_object_file *tfile;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
uint32_t fence_flags;
struct ttm_buffer_object *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
};
 
struct vmw_legacy_display;
struct vmw_overlay;
 
struct vmw_master {
struct ttm_lock lock;
struct mutex fb_surf_mutex;
struct list_head fb_surf;
};
 
struct vmw_vga_topology_state {
uint32_t width;
uint32_t height;
uint32_t primary;
uint32_t pos_x;
uint32_t pos_y;
};
 
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
 
struct vmw_fifo_state fifo;
 
struct drm_device *dev;
unsigned long vmw_chipset;
unsigned int io_start;
uint32_t vram_start;
uint32_t vram_size;
uint32_t mmio_start;
uint32_t mmio_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
uint32_t initial_width;
uint32_t initial_height;
__le32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t memory_size;
bool has_gmr;
struct mutex hw_mutex;
 
/*
* VGA registers.
*/
 
struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
uint32_t vga_width;
uint32_t vga_height;
uint32_t vga_bpp;
uint32_t vga_bpl;
uint32_t vga_pitchlock;
 
uint32_t num_displays;
 
/*
* Framebuffer info.
*/
 
void *fb_info;
struct vmw_legacy_display *ldu_priv;
struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
 
/*
* Context and surface management.
*/
 
rwlock_t resource_lock;
struct idr res_idr[vmw_res_max];
/*
* Block lastclose from racing with firstopen.
*/
 
struct mutex init_mutex;
 
/*
* A resource manager for kernel-only surfaces and
* contexts.
*/
 
struct ttm_object_device *tdev;
 
/*
* Fencing and IRQs.
*/
 
atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
int fence_queue_waiters; /* Protected by hw_mutex */
int goal_queue_waiters; /* Protected by hw_mutex */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
struct vmw_fence_manager *fman;
uint32_t irq_mask;
 
/*
* Device state
*/
 
uint32_t traces_state;
uint32_t enable_state;
uint32_t config_done_state;
 
/**
* Execbuf
*/
/**
* Protected by the cmdbuf mutex.
*/
 
struct vmw_sw_context ctx;
struct mutex cmdbuf_mutex;
 
/**
* Operating mode.
*/
 
bool stealth;
bool is_opened;
bool enable_fb;
 
/**
* Master management.
*/
 
// struct vmw_master *active_master;
// struct vmw_master fbdev_master;
// struct notifier_block pm_nb;
bool suspended;
 
struct mutex release_mutex;
uint32_t num_3d_resources;
 
/*
* Query processing. These members
* are protected by the cmdbuf mutex.
*/
 
struct ttm_buffer_object *dummy_query_bo;
struct ttm_buffer_object *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
 
/*
* Surface swapping. The "surface_lru" list is protected by the
* resource lock in order to be able to destroy a surface and take
* it off the lru atomically. "used_memory_size" is currently
* protected by the cmdbuf mutex for simplicity.
*/
 
struct list_head res_lru[vmw_res_max];
uint32_t used_memory_size;
};
 
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
return container_of(res, struct vmw_surface, res);
}
 
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
return (struct vmw_private *)dev->dev_private;
}
 
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
{
return (struct vmw_fpriv *)file_priv->driver_priv;
}
 
static inline struct vmw_master *vmw_master(struct drm_master *master)
{
return (struct vmw_master *) master->driver_priv;
}
 
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
}
 
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
uint32_t val;
 
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
return val;
}
 
 
int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
 
/**
* GMR utilities - vmwgfx_gmr.c
*/
 
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
struct page *pages[],
unsigned long num_pages,
int gmr_id);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
 
/**
* Resource utilities - vmwgfx_resource.c
*/
struct vmw_user_resource_conv;
extern const struct vmw_user_resource_conv *user_surface_converter;
extern const struct vmw_user_resource_conv *user_context_converter;
 
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle, int *id);
extern int vmw_surface_validate(struct vmw_private *dev_priv,
struct vmw_surface *srf);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free) (struct ttm_buffer_object *bo));
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
*/
extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
struct ttm_placement *placement,
bool interruptible);
extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible);
extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible);
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool pin, bool interruptible);
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
 
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
*/
 
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
//extern unsigned int vmw_fops_poll(struct file *filp,
// struct poll_table_struct *wait);
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
 
/**
* Fifo utilities - vmwgfx_fifo.c
*/
 
extern int vmw_fifo_init(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid);
 
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
 
extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
/**
* TTM buffer object driver - vmwgfx_buffer.c
*/
 
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
 
/**
* Command submission - vmwgfx_execbuf.c
*/
 
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence);
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
 
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle);
 
/**
* IRQs and wating - vmwgfx_irq.c
*/
 
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible,
unsigned long timeout);
extern void vmw_irq_preinstall(struct drm_device *dev);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev);
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno);
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
uint32_t seqno,
bool interruptible,
unsigned long timeout);
extern void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state);
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
 
/**
* Rudimentary fence-like objects currently used only for throttling -
* vmwgfx_marker.c
*/
 
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
extern int vmw_marker_push(struct vmw_marker_queue *queue,
uint32_t seqno);
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
uint32_t signaled_seqno);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_marker_queue *queue, uint32_t us);
 
/**
* Kernel framebuffer - vmwgfx_fb.c
*/
 
int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);
 
/**
* Kernel modesetting - vmwgfx_kms.c
*/
 
int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
int vmw_enable_vblank(struct drm_device *dev, int crtc);
void vmw_disable_vblank(struct drm_device *dev, int crtc);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct vmw_surface *surface,
uint32_t sid, int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips);
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *clips,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
 
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
/**
* Overlay control - vmwgfx_overlay.c
*/
 
int vmw_overlay_init(struct vmw_private *dev_priv);
int vmw_overlay_close(struct vmw_private *dev_priv);
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
 
/**
* GMR Id manager
*/
 
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
 
/**
* Inline helper functions
*/
 
static inline void vmw_surface_unreference(struct vmw_surface **srf)
{
struct vmw_surface *tmp_srf = *srf;
struct vmw_resource *res = &tmp_srf->res;
*srf = NULL;
 
vmw_resource_unreference(&res);
}
 
static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
{
(void) vmw_resource_reference(&srf->res);
return srf;
}
 
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
{
struct vmw_dma_buffer *tmp_buf = *buf;
 
*buf = NULL;
if (tmp_buf != NULL) {
struct ttm_buffer_object *bo = &tmp_buf->base;
 
ttm_bo_unref(&bo);
}
}
 
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
{
if (ttm_bo_reference(&buf->base))
return buf;
return NULL;
}
 
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
{
return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
}
 
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_execbuf.c
0,0 → 1,1779
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
 
#define VMW_RES_HT_ORDER 12
 
/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
* @offset: Offset of 4 byte entries into the command buffer where the
* id that needs fixup is located.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
unsigned long offset;
};
 
/**
* struct vmw_resource_val_node - Validation info for resources
*
* @head: List head for the software context's resource list.
* @hash: Hash entry for quick resouce to val_node lookup.
* @res: Ref-counted pointer to the resource.
* @switch_backup: Boolean whether to switch backup buffer on unreserve.
* @new_backup: Refcounted pointer to the new backup buffer.
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
* @no_buffer_needed: Resources do not need to allocate buffer backup on
* reservation. The command stream will provide one.
*/
struct vmw_resource_val_node {
struct list_head head;
struct drm_hash_item hash;
struct vmw_resource *res;
struct vmw_dma_buffer *new_backup;
unsigned long new_backup_offset;
bool first_usage;
bool no_buffer_needed;
};
 
/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* command submission.
*
* @list_head: list of resources to unreserve.
* @backoff: Whether command submission failed.
*/
static void vmw_resource_list_unreserve(struct list_head *list,
bool backoff)
{
struct vmw_resource_val_node *val;
 
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
struct vmw_dma_buffer *new_backup =
backoff ? NULL : val->new_backup;
 
vmw_resource_unreserve(res, new_backup,
val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
}
}
 
 
/**
* vmw_resource_val_add - Add a resource to the software context's
* resource list if it's not already on it.
*
* @sw_context: Pointer to the software context.
* @res: Pointer to the resource.
* @p_node On successful return points to a valid pointer to a
* struct vmw_resource_val_node, if non-NULL on entry.
*/
static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res,
struct vmw_resource_val_node **p_node)
{
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret;
 
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
&hash) == 0)) {
node = container_of(hash, struct vmw_resource_val_node, hash);
node->first_usage = false;
if (unlikely(p_node != NULL))
*p_node = node;
return 0;
}
 
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(node == NULL)) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
}
 
node->hash.key = (unsigned long) res;
ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a resource validation "
"entry.\n");
kfree(node);
return ret;
}
list_add_tail(&node->head, &sw_context->resource_list);
node->res = vmw_resource_reference(res);
node->first_usage = true;
 
if (unlikely(p_node != NULL))
*p_node = node;
 
return 0;
}
 
/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
* id that needs fixup is located. Granularity is 4 bytes.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
unsigned long offset)
{
struct vmw_resource_relocation *rel;
 
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
if (unlikely(rel == NULL)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
 
rel->res = res;
rel->offset = offset;
list_add_tail(&rel->head, list);
 
return 0;
}
 
/**
* vmw_resource_relocations_free - Free all relocations on a list
*
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_free(struct list_head *list)
{
struct vmw_resource_relocation *rel, *n;
 
list_for_each_entry_safe(rel, n, list, head) {
list_del(&rel->head);
kfree(rel);
}
}
 
/**
* vmw_resource_relocations_apply - Apply all relocations on a list
*
* @cb: Pointer to the start of the command buffer bein patch. This need
* not be the same buffer as the one being parsed when the relocation
* list was built, but the contents must be the same modulo the
* resource ids.
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_apply(uint32_t *cb,
struct list_head *list)
{
struct vmw_resource_relocation *rel;
 
list_for_each_entry(rel, list, head)
cb[rel->offset] = rel->res->id;
}
 
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
return 0; //capable(CAP_SYS_ADMIN) ? : -EINVAL;
}
 
static int vmw_cmd_ok(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
return 0;
}
 
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
* Returns -EINVAL if the limit of number of buffer objects per command
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
uint32_t *p_val_node)
{
uint32_t val_node;
struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
struct drm_hash_item *hash;
int ret;
 
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
val_buf = &vval_buf->base;
val_node = vval_buf - sw_context->val_bufs;
} else {
val_node = sw_context->cur_val_buf;
if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
DRM_ERROR("Max number of DMA buffers per submission "
"exceeded.\n");
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
vval_buf->hash.key = (unsigned long) bo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
"entry.\n");
return ret;
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
}
 
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
 
if (p_val_node)
*p_val_node = val_node;
 
return 0;
}
 
/**
* vmw_resources_reserve - Reserve all resources on the sw_context's
* resource list.
*
* @sw_context: Pointer to the software context.
*
* Note that since vmware's command submission currently is protected by
* the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
* since only a single thread at once will attempt this.
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
 
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
 
ret = vmw_resource_reserve(res, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
 
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
 
ret = vmw_bo_to_validate_list
(sw_context, bo, NULL);
 
if (unlikely(ret != 0))
return ret;
}
}
return 0;
}
 
/**
* vmw_resources_validate - Validate all resources on the sw_context's
* resource list.
*
* @sw_context: Pointer to the software context.
*
* Before this function is called, all resource backup buffers must have
* been validated.
*/
static int vmw_resources_validate(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
 
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
 
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
}
return 0;
}
 
/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
* @id: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
*/
static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id,
struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
struct vmw_resource *res;
struct vmw_resource_val_node *node;
int ret;
 
if (*id == SVGA3D_INVALID_ID)
return 0;
 
/*
* Fastpath in case of repeated commands referencing the same
* resource
*/
 
if (likely(rcache->valid && *id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
 
rcache->node->first_usage = false;
if (p_val)
*p_val = rcache->node;
 
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
id - sw_context->buf_start);
}
 
ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->tfile,
*id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) *id);
// dump_stack();
return ret;
}
 
rcache->valid = true;
rcache->res = res;
rcache->handle = *id;
 
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
id - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
 
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
 
rcache->node = node;
if (p_val)
*p_val = node;
vmw_resource_unreference(&res);
return 0;
 
out_no_reloc:
BUG_ON(sw_context->error_resource != NULL);
sw_context->error_resource = res;
 
return ret;
}
 
/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @header: A command header with an embedded user-space context handle.
*
* Convenience function: Call vmw_cmd_res_check with the user-space context
* handle embedded in @header.
*/
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
__le32 cid;
} *cmd;
 
cmd = container_of(header, struct vmw_cid_cmd, header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->cid, NULL);
}
 
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
int ret;
 
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
 
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.target.sid, NULL);
return ret;
}
 
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
} *cmd;
int ret;
 
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
 
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceStretchBlt body;
} *cmd;
int ret;
 
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
 
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
 
cmd = container_of(header, struct vmw_sid_cmd, header);
 
if (unlikely(!sw_context->kernel)) {
DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
return -EPERM;
}
 
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.srcImage.sid, NULL);
}
 
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdPresent body;
} *cmd;
 
 
cmd = container_of(header, struct vmw_sid_cmd, header);
 
if (unlikely(!sw_context->kernel)) {
DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
return -EPERM;
}
 
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->body.sid,
NULL);
}
 
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
*
* @dev_priv: The device private structure.
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
* This function checks whether @new_query_bo is suitable for holding
* query results, and if another buffer currently is pinned for query
* results. If so, the function prepares the state of @sw_context for
* switching pinned buffers after successful submission of the current
* command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct ttm_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
int ret;
 
BUG_ON(!ctx_entry->valid);
sw_context->last_query_ctx = ctx_entry->res;
 
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
if (unlikely(new_query_bo->num_pages > 4)) {
DRM_ERROR("Query buffer too large.\n");
return -EINVAL;
}
 
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
NULL);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
 
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
NULL);
if (unlikely(ret != 0))
return ret;
 
}
 
return 0;
}
 
 
/**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
*
* @dev_priv: The device private structure.
* @sw_context: The software context used for this command submission batch.
*
* This function will check if we're switching query buffers, and will then,
* issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all
* preceding queries have finished, and the old query buffer can be unpinned.
* However, since both the new query buffer and the old one are fenced with
* that fence, we can do an asynchronus unpin now, and be sure that the
* old query buffer won't be moved until the fence has signaled.
*
* As mentioned above, both the new - and old query buffers need to be fenced
* using a sequence emitted *after* calling this function.
*/
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
/*
* The validate list should still hold references to all
* contexts here.
*/
 
if (sw_context->needs_post_query_barrier) {
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
struct vmw_resource *ctx;
int ret;
 
BUG_ON(!ctx_entry->valid);
ctx = ctx_entry->res;
 
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 
if (unlikely(ret != 0))
DRM_ERROR("Out of fifo space for dummy query.\n");
}
 
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
vmw_bo_pin(dev_priv->pinned_bo, false);
ttm_bo_unref(&dev_priv->pinned_bo);
}
 
if (!sw_context->needs_post_query_barrier) {
vmw_bo_pin(sw_context->cur_query_bo, true);
 
/*
* We pin also the dummy_query_bo buffer so that we
* don't need to validate it when emitting
* dummy queries in context destroy paths.
*/
 
vmw_bo_pin(dev_priv->dummy_query_bo, true);
dev_priv->dummy_query_bo_pinned = true;
 
BUG_ON(sw_context->last_query_ctx == NULL);
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
ttm_bo_reference(sw_context->cur_query_bo);
}
}
}
 
/**
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer
* handle to a valid SVGAGuestPtr
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a valid SVGAGuestPtr. The translation does not take place
* immediately, but during a call to vmw_apply_relocations().
* This function builds a relocation list and a list of buffers to validate.
* The former needs to be freed using either vmw_apply_relocations() or
* vmw_free_relocations(). The latter needs to be freed using
* vmw_clear_validations.
*/
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
 
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
}
bo = &vmw_bo->base;
 
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
 
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
 
ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
 
*vmw_bo_p = vmw_bo;
return 0;
 
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL;
return ret;
}
 
/**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_begin_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBeginQuery q;
} *cmd;
 
cmd = container_of(header, struct vmw_begin_query_cmd,
header);
 
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->q.cid,
NULL);
}
 
/**
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdEndQuery q;
} *cmd;
int ret;
 
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
 
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->q.guestResult,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
 
ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
 
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
 
/*
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo;
struct vmw_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
 
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
 
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->q.guestResult,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
 
vmw_dmabuf_unreference(&vmw_bo);
return 0;
}
 
static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
 
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
DRM_ERROR("could not find surface for DMA.\n");
goto out_no_surface;
}
 
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
// vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
 
out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
 
static int vmw_cmd_draw(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_draw_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDrawPrimitives body;
} *cmd;
SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
(unsigned long)header + sizeof(*cmd));
SVGA3dPrimitiveRange *range;
uint32_t i;
uint32_t maxnum;
int ret;
 
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
 
cmd = container_of(header, struct vmw_draw_cmd, header);
maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
 
if (unlikely(cmd->body.numVertexDecls > maxnum)) {
DRM_ERROR("Illegal number of vertex declarations.\n");
return -EINVAL;
}
 
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
 
maxnum = (header->size - sizeof(cmd->body) -
cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
if (unlikely(cmd->body.numRanges > maxnum)) {
DRM_ERROR("Illegal number of index ranges.\n");
return -EINVAL;
}
 
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
 
 
static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_tex_state_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetTextureState state;
};
 
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
int ret;
 
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
 
for (; cur_state < last_state; ++cur_state) {
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cur_state->value, NULL);
if (unlikely(ret != 0))
return ret;
}
 
return 0;
}
 
static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf)
{
struct vmw_dma_buffer *vmw_bo;
int ret;
 
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd = buf;
 
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
 
vmw_dmabuf_unreference(&vmw_bo);
 
return ret;
}
 
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_set_shader_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
int ret;
 
cmd = container_of(header, struct vmw_set_shader_cmd,
header);
 
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
 
return 0;
}
 
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
{
uint32_t size_remaining = *size;
uint32_t cmd_id;
 
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
switch (cmd_id) {
case SVGA_CMD_UPDATE:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
break;
case SVGA_CMD_DEFINE_GMRFB:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
break;
case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
default:
DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
return -EINVAL;
}
 
if (*size > size_remaining) {
DRM_ERROR("Invalid SVGA command (size mismatch):"
" %u.\n", cmd_id);
return -EINVAL;
}
 
if (unlikely(!sw_context->kernel)) {
DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
return -EPERM;
}
 
if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
 
return 0;
}
 
typedef int (*vmw_cmd_func) (struct vmw_private *,
struct vmw_sw_context *,
SVGA3dCmdHeader *);
 
#define VMW_CMD_DEF(cmd, func) \
[cmd - SVGA_3D_CMD_BASE] = func
 
static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
&vmw_cmd_set_render_target_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
&vmw_cmd_blt_surf_screen_check),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
};
 
static int vmw_cmd_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
{
uint32_t cmd_id;
uint32_t size_remaining = *size;
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
 
cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
/* Handle any none 3D commands */
if (unlikely(cmd_id < SVGA_CMD_MAX))
return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
 
 
cmd_id = le32_to_cpu(header->id);
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
 
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(*size > size_remaining))
goto out_err;
 
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
goto out_err;
 
ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
if (unlikely(ret != 0))
goto out_err;
 
return 0;
out_err:
DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
 
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf,
uint32_t size)
{
int32_t cur_size = size;
int ret;
 
sw_context->buf_start = buf;
 
while (cur_size > 0) {
size = cur_size;
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
if (unlikely(ret != 0))
return ret;
buf = (void *)((unsigned long) buf + size);
cur_size -= size;
}
 
if (unlikely(cur_size != 0)) {
DRM_ERROR("Command verifier out of sync.\n");
return -EINVAL;
}
 
return 0;
}
 
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
sw_context->cur_reloc = 0;
}
 
static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
{
uint32_t i;
struct vmw_relocation *reloc;
struct ttm_validate_buffer *validate;
struct ttm_buffer_object *bo;
 
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
break;
case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
break;
default:
BUG();
}
}
vmw_free_relocations(sw_context);
}
 
/**
* vmw_resource_list_unrefererence - Free up a resource list and unreference
* all resources referenced by it.
*
* @list: The resource list.
*/
static void vmw_resource_list_unreference(struct list_head *list)
{
struct vmw_resource_val_node *val, *val_next;
 
/*
* Drop references to resources held during command submission.
*/
 
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
kfree(val);
}
}
 
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct vmw_validate_buffer *entry, *next;
struct vmw_resource_val_node *val;
 
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
base.head) {
list_del(&entry->base.head);
ttm_bo_unref(&entry->base.bo);
(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
 
list_for_each_entry(val, &sw_context->resource_list, head)
(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
 
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo)
{
int ret;
 
 
/*
* Don't validate pinned buffers.
*/
 
if (bo == dev_priv->pinned_bo ||
(bo == dev_priv->dummy_query_bo &&
dev_priv->dummy_query_bo_pinned))
return 0;
 
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
 
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
 
/**
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
 
DRM_INFO("Falling through to VRAM.\n");
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
return ret;
}
 
 
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
struct vmw_validate_buffer *entry;
int ret;
 
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
 
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
uint32_t size)
{
if (likely(sw_context->cmd_bounce_size >= size))
return 0;
 
if (sw_context->cmd_bounce_size == 0)
sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
 
while (sw_context->cmd_bounce_size < size) {
sw_context->cmd_bounce_size =
PAGE_ALIGN(sw_context->cmd_bounce_size +
(sw_context->cmd_bounce_size >> 1));
}
 
if (sw_context->cmd_bounce != NULL)
vfree(sw_context->cmd_bounce);
 
sw_context->cmd_bounce = KernelAlloc(sw_context->cmd_bounce_size);
 
if (sw_context->cmd_bounce == NULL) {
DRM_ERROR("Failed to allocate command bounce buffer.\n");
sw_context->cmd_bounce_size = 0;
return -ENOMEM;
}
 
return 0;
}
 
/**
* vmw_execbuf_fence_commands - create and submit a command stream fence
*
* Creates a fence object and submits a command stream marker.
* If this fails for some reason, We sync the fifo and return NULL.
* It is then safe to fence buffers with a NULL pointer.
*
* If @p_handle is not NULL @file_priv must also not be NULL. Creates
* a userspace handle if @p_handle is not NULL, otherwise not.
*/
 
int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle)
{
uint32_t sequence;
int ret;
bool synced = false;
 
/* p_handle implies file_priv. */
BUG_ON(p_handle != NULL && file_priv == NULL);
 
ret = vmw_fifo_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
DRM_ERROR("Fence submission error. Syncing.\n");
synced = true;
}
 
if (p_handle != NULL)
ret = vmw_user_fence_create(file_priv, dev_priv->fman,
sequence,
DRM_VMW_FENCE_FLAG_EXEC,
p_fence, p_handle);
else
ret = vmw_fence_create(dev_priv->fman, sequence,
DRM_VMW_FENCE_FLAG_EXEC,
p_fence);
 
if (unlikely(ret != 0 && !synced)) {
(void) vmw_fallback_wait(dev_priv, false, false,
sequence, false,
VMW_FENCE_WAIT_TIMEOUT);
*p_fence = NULL;
}
 
return 0;
}
 
 
/**
* vmw_execbuf_copy_fence_user - copy fence object information to
* user-space.
*
* @dev_priv: Pointer to a vmw_private struct.
* @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
* @ret: Return value from fence object creation.
* @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
* which the information should be copied.
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
*
* This function copies fence information to user-space. If copying fails,
* The user-space struct drm_vmw_fence_rep::error member is hopefully
* left untouched, and if it's preloaded with an -EFAULT by user-space,
* the error will hopefully be detected.
* Also if copying fails, user-space will be unable to signal the fence
* object so we wait for it immediately, and then unreference the
* user-space reference.
*/
void
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle)
{
struct drm_vmw_fence_rep fence_rep;
 
if (user_fence_rep == NULL)
return;
 
memset(&fence_rep, 0, sizeof(fence_rep));
 
fence_rep.error = ret;
if (ret == 0) {
BUG_ON(fence == NULL);
 
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->seqno;
vmw_update_seqno(dev_priv, &dev_priv->fifo);
fence_rep.passed_seqno = dev_priv->last_read_seqno;
}
 
/*
* copy_to_user errors will be detected by user space not
* seeing fence_rep::error filled in. Typically
* user-space would have pre-set that member to -EFAULT.
*/
// ret = copy_to_user(user_fence_rep, &fence_rep,
// sizeof(fence_rep));
 
/*
* User-space lost the fence object. We need to sync
* and unreference the handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
ttm_ref_object_base_unref(vmw_fp->tfile,
fence_handle, TTM_REF_USAGE);
DRM_ERROR("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, fence->signal_mask,
false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
}
 
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
struct ww_acquire_ctx ticket;
uint32_t handle;
void *cmd;
int ret;
 
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
 
/*
if (kernel_commands == NULL) {
sw_context->kernel = false;
 
ret = vmw_resize_cmd_bounce(sw_context, command_size);
if (unlikely(ret != 0))
goto out_unlock;
 
 
ret = copy_from_user(sw_context->cmd_bounce,
user_commands, command_size);
 
if (unlikely(ret != 0)) {
ret = -EFAULT;
DRM_ERROR("Failed copying commands.\n");
goto out_unlock;
}
kernel_commands = sw_context->cmd_bounce;
} else */
sw_context->kernel = true;
 
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
if (!sw_context->res_ht_initialized) {
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
goto out_unlock;
sw_context->res_ht_initialized = true;
}
 
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err;
 
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
goto out_err;
 
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
 
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
goto out_err;
 
ret = vmw_resources_validate(sw_context);
if (unlikely(ret != 0))
goto out_err;
 
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
 
if (unlikely(ret != 0))
goto out_err;
}
 
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
goto out_err;
}
 
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
 
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
 
vmw_fifo_commit(dev_priv, command_size);
 
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
&fence,
(user_fence_rep) ? &handle : NULL);
/*
* This error is harmless, because if fence submission fails,
* vmw_fifo_send_fence will sync. The error will be propagated to
* user-space in @fence_rep
*/
 
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
 
vmw_resource_list_unreserve(&sw_context->resource_list, false);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
 
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
 
vmw_clear_validations(sw_context);
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
 
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
*out_fence = fence;
fence = NULL;
} else if (likely(fence != NULL)) {
vmw_fence_obj_unreference(&fence);
}
 
list_splice_init(&sw_context->resource_list, &resource_list);
mutex_unlock(&dev_priv->cmdbuf_mutex);
 
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
vmw_resource_list_unreference(&resource_list);
 
return 0;
 
out_err:
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
mutex_unlock(&dev_priv->cmdbuf_mutex);
 
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
vmw_resource_list_unreference(&resource_list);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
 
return ret;
}
 
 
 
/**
* vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
*
* @dev_priv: The device private structure.
*
* This function is called to idle the fifo and unpin the query buffer
* if the normal way to do this hits an error, which should typically be
* extremely rare.
*/
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
{
DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
 
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
}
 
 
/**
* __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
* @fence: If non-NULL should point to a struct vmw_fence_obj issued
* _after_ a query barrier that flushes all queries touching the current
* buffer pointed to by @dev_priv->pinned_bo
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
* finished before the next fifo command. (For example on hardware
* context destructions where the hardware may otherwise leak unfinished
* queries).
*
* This function does not return any failure codes, but make attempts
* to do safe unpinning in case of errors.
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
*
* the @dev_priv->cmdbuf_mutex needs to be held by the current thread
* before calling this function.
*/
void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence)
{
int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
struct vmw_fence_obj *lfence = NULL;
struct ww_acquire_ctx ticket;
 
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
 
INIT_LIST_HEAD(&validate_list);
 
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
list_add_tail(&pinned_val.head, &validate_list);
 
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
list_add_tail(&query_val.head, &validate_list);
 
do {
ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
} while (ret == -ERESTARTSYS);
 
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve;
}
 
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_emit;
}
dev_priv->query_cid_valid = false;
}
 
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
 
if (fence == NULL) {
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
NULL);
fence = lfence;
}
ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
if (lfence != NULL)
vmw_fence_obj_unreference(&lfence);
 
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
 
out_unlock:
return;
 
out_no_emit:
ttm_eu_backoff_reservation(&ticket, &validate_list);
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
}
 
/**
* vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
* finished before the next fifo command. (For example on hardware
* context destructions where the hardware may otherwise leak unfinished
* queries).
*
* This function does not return any failure codes, but make attempts
* to do safe unpinning in case of errors.
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
*/
void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->cmdbuf_mutex);
if (dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
 
 
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
// struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/*
* This will allow us to extend the ioctl argument while
* maintaining backwards compatibility:
* We take different code paths depending on the value of
* arg->version.
*/
 
if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
DRM_ERROR("Incorrect execbuf version.\n");
DRM_ERROR("You're running outdated experimental "
"vmwgfx user-space drivers.");
return -EINVAL;
}
 
// ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
(void __user *)(unsigned long)arg->fence_rep,
NULL);
 
if (unlikely(ret != 0))
goto out_unlock;
 
// vmw_kms_cursor_post_execbuf(dev_priv);
 
out_unlock:
// ttm_read_unlock(&vmaster->lock);
return ret;
}
/drivers/video/drm/vmwgfx/vmwgfx_fence.c
0,0 → 1,1157
/**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
 
#define VMW_FENCE_WRAP (1 << 31)
 
struct vmw_fence_manager {
int num_fence_objects;
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
struct work_struct work;
u32 user_fence_size;
u32 fence_size;
u32 event_fence_action_size;
bool fifo_down;
struct list_head cleanup_list;
uint32_t pending_actions[VMW_ACTION_MAX];
struct mutex goal_irq_mutex;
bool goal_irq_on; /* Protected by @goal_irq_mutex */
bool seqno_valid; /* Protected by @lock, and may not be set to true
without the @goal_irq_mutex held. */
};
 
struct vmw_user_fence {
struct ttm_base_object base;
struct vmw_fence_obj fence;
};
 
/**
* struct vmw_event_fence_action - fence action that delivers a drm event.
*
* @e: A struct drm_pending_event that controls the event delivery.
* @action: A struct vmw_fence_action to hook up to a fence.
* @fence: A referenced pointer to the fence to keep it alive while @action
* hangs on it.
* @dev: Pointer to a struct drm_device so we can access the event stuff.
* @kref: Both @e and @action has destructors, so we need to refcount.
* @size: Size accounted for this object.
* @tv_sec: If non-null, the variable pointed to will be assigned
* current time tv_sec val when the fence signals.
* @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
* be assigned the current time tv_usec val when the fence signals.
*/
struct vmw_event_fence_action {
struct vmw_fence_action action;
struct list_head fpriv_head;
 
struct drm_pending_event *event;
struct vmw_fence_obj *fence;
struct drm_device *dev;
 
uint32_t *tv_sec;
uint32_t *tv_usec;
};
 
/**
* Note on fencing subsystem usage of irqs:
* Typically the vmw_fences_update function is called
*
* a) When a new fence seqno has been submitted by the fifo code.
* b) On-demand when we have waiters. Sleeping waiters will switch on the
* ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
* irq is received. When the last fence waiter is gone, that IRQ is masked
* away.
*
* In situations where there are no waiters and we don't submit any new fences,
* fence objects may not be signaled. This is perfectly OK, since there are
* no consumers of the signaled data, but that is NOT ok when there are fence
* actions attached to a fence. The fencing subsystem then makes use of the
* FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
* which has an action attached, and each time vmw_fences_update is called,
* the subsystem makes sure the fence goal seqno is updated.
*
* The fence goal seqno irq is on as long as there are unsignaled fence
* objects with actions attached to them.
*/
 
static void vmw_fence_obj_destroy_locked(struct kref *kref)
{
struct vmw_fence_obj *fence =
container_of(kref, struct vmw_fence_obj, kref);
 
struct vmw_fence_manager *fman = fence->fman;
unsigned int num_fences;
 
list_del_init(&fence->head);
num_fences = --fman->num_fence_objects;
spin_unlock_irq(&fman->lock);
if (fence->destroy)
fence->destroy(fence);
else
kfree(fence);
 
spin_lock_irq(&fman->lock);
}
 
 
/**
* Execute signal actions on fences recently signaled.
* This is done from a workqueue so we don't have to execute
* signal actions from atomic context.
*/
 
static void vmw_fence_work_func(struct work_struct *work)
{
struct vmw_fence_manager *fman =
container_of(work, struct vmw_fence_manager, work);
struct list_head list;
struct vmw_fence_action *action, *next_action;
bool seqno_valid;
 
do {
INIT_LIST_HEAD(&list);
mutex_lock(&fman->goal_irq_mutex);
 
spin_lock_irq(&fman->lock);
list_splice_init(&fman->cleanup_list, &list);
seqno_valid = fman->seqno_valid;
spin_unlock_irq(&fman->lock);
 
if (!seqno_valid && fman->goal_irq_on) {
fman->goal_irq_on = false;
vmw_goal_waiter_remove(fman->dev_priv);
}
mutex_unlock(&fman->goal_irq_mutex);
 
if (list_empty(&list))
return;
 
/*
* At this point, only we should be able to manipulate the
* list heads of the actions we have on the private list.
* hence fman::lock not held.
*/
 
list_for_each_entry_safe(action, next_action, &list, head) {
list_del_init(&action->head);
if (action->cleanup)
action->cleanup(action);
}
} while (1);
}
 
struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
 
if (unlikely(fman == NULL))
return NULL;
 
fman->dev_priv = dev_priv;
spin_lock_init(&fman->lock);
INIT_LIST_HEAD(&fman->fence_list);
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
 
return fman;
}
 
void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
unsigned long irq_flags;
bool lists_empty;
 
// (void) cancel_work_sync(&fman->work);
 
spin_lock_irqsave(&fman->lock, irq_flags);
lists_empty = list_empty(&fman->fence_list) &&
list_empty(&fman->cleanup_list);
spin_unlock_irqrestore(&fman->lock, irq_flags);
 
BUG_ON(!lists_empty);
kfree(fman);
}
 
static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
struct vmw_fence_obj *fence,
u32 seqno,
uint32_t mask,
void (*destroy) (struct vmw_fence_obj *fence))
{
unsigned long irq_flags;
unsigned int num_fences;
int ret = 0;
 
fence->seqno = seqno;
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->fman = fman;
fence->signaled = 0;
fence->signal_mask = mask;
kref_init(&fence->kref);
fence->destroy = destroy;
init_waitqueue_head(&fence->queue);
 
spin_lock_irqsave(&fman->lock, irq_flags);
if (unlikely(fman->fifo_down)) {
ret = -EBUSY;
goto out_unlock;
}
list_add_tail(&fence->head, &fman->fence_list);
num_fences = ++fman->num_fence_objects;
 
out_unlock:
spin_unlock_irqrestore(&fman->lock, irq_flags);
return ret;
 
}
 
struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (unlikely(fence == NULL))
return NULL;
 
kref_get(&fence->kref);
return fence;
}
 
/**
* vmw_fence_obj_unreference
*
* Note that this function may not be entered with disabled irqs since
* it may re-enable them in the destroy function.
*
*/
void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
{
struct vmw_fence_obj *fence = *fence_p;
struct vmw_fence_manager *fman;
 
if (unlikely(fence == NULL))
return;
 
fman = fence->fman;
*fence_p = NULL;
spin_lock_irq(&fman->lock);
BUG_ON(atomic_read(&fence->kref.refcount) == 0);
kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
spin_unlock_irq(&fman->lock);
}
 
void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
struct list_head *list)
{
struct vmw_fence_action *action, *next_action;
 
list_for_each_entry_safe(action, next_action, list, head) {
list_del_init(&action->head);
fman->pending_actions[action->type]--;
if (action->seq_passed != NULL)
action->seq_passed(action);
 
/*
* Add the cleanup action to the cleanup list so that
* it will be performed by a worker task.
*/
 
list_add_tail(&action->head, &fman->cleanup_list);
}
}
 
/**
* vmw_fence_goal_new_locked - Figure out a new device fence goal
* seqno if needed.
*
* @fman: Pointer to a fence manager.
* @passed_seqno: The seqno the device currently signals as passed.
*
* This function should be called with the fence manager lock held.
* It is typically called when we have a new passed_seqno, and
* we might need to update the fence goal. It checks to see whether
* the current fence goal has already passed, and, in that case,
* scans through all unsignaled fences to get the next fence object with an
* action attached, and sets the seqno of that fence as a new fence goal.
*
* returns true if the device goal seqno was updated. False otherwise.
*/
static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
__le32 __iomem *fifo_mem;
struct vmw_fence_obj *fence;
 
if (likely(!fman->seqno_valid))
return false;
 
fifo_mem = fman->dev_priv->mmio_virt;
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;
 
fman->seqno_valid = false;
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
iowrite32(fence->seqno,
fifo_mem + SVGA_FIFO_FENCE_GOAL);
break;
}
}
 
return true;
}
 
 
/**
* vmw_fence_goal_check_locked - Replace the device fence goal seqno if
* needed.
*
* @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
* considered as a device fence goal.
*
* This function should be called with the fence manager lock held.
* It is typically called when an action has been attached to a fence to
* check whether the seqno of that fence should be used for a fence
* goal interrupt. This is typically needed if the current fence goal is
* invalid, or has a higher seqno than that of the current fence object.
*
* returns true if the device goal seqno was updated. False otherwise.
*/
static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
u32 goal_seqno;
__le32 __iomem *fifo_mem;
 
if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
return false;
 
fifo_mem = fence->fman->dev_priv->mmio_virt;
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
if (likely(fence->fman->seqno_valid &&
goal_seqno - fence->seqno < VMW_FENCE_WRAP))
return false;
 
iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
fence->fman->seqno_valid = true;
 
return true;
}
 
void vmw_fences_update(struct vmw_fence_manager *fman)
{
unsigned long flags;
struct vmw_fence_obj *fence, *next_fence;
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
 
seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
rerun:
spin_lock_irqsave(&fman->lock, flags);
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
vmw_fences_perform_actions(fman, &action_list);
wake_up_all(&fence->queue);
} else
break;
}
 
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
 
// if (!list_empty(&fman->cleanup_list))
// (void) schedule_work(&fman->work);
spin_unlock_irqrestore(&fman->lock, flags);
 
/*
* Rerun if the fence goal seqno was updated, and the
* hardware might have raced with that update, so that
* we missed a fence_goal irq.
*/
 
if (unlikely(needs_rerun)) {
new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
}
}
}
 
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
uint32_t flags)
{
struct vmw_fence_manager *fman = fence->fman;
unsigned long irq_flags;
uint32_t signaled;
 
spin_lock_irqsave(&fman->lock, irq_flags);
signaled = fence->signaled;
spin_unlock_irqrestore(&fman->lock, irq_flags);
 
flags &= fence->signal_mask;
if ((signaled & flags) == flags)
return 1;
 
if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
vmw_fences_update(fman);
 
spin_lock_irqsave(&fman->lock, irq_flags);
signaled = fence->signaled;
spin_unlock_irqrestore(&fman->lock, irq_flags);
 
return ((signaled & flags) == flags);
}
 
int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
uint32_t flags, bool lazy,
bool interruptible, unsigned long timeout)
{
struct vmw_private *dev_priv = fence->fman->dev_priv;
long ret;
 
if (likely(vmw_fence_obj_signaled(fence, flags)))
return 0;
 
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv);
 
if (interruptible)
ret = wait_event_interruptible_timeout
(fence->queue,
vmw_fence_obj_signaled(fence, flags),
timeout);
else
ret = wait_event_timeout
(fence->queue,
vmw_fence_obj_signaled(fence, flags),
timeout);
 
vmw_seqno_waiter_remove(dev_priv);
 
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
 
return ret;
}
 
void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
{
struct vmw_private *dev_priv = fence->fman->dev_priv;
 
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
}
 
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fence->fman;
 
kfree(fence);
/*
* Free kernel space accounting.
*/
ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
fman->fence_size);
}
 
int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
uint32_t mask,
struct vmw_fence_obj **p_fence)
{
struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
struct vmw_fence_obj *fence;
int ret;
 
ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
false, false);
if (unlikely(ret != 0))
return ret;
 
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL)) {
ret = -ENOMEM;
goto out_no_object;
}
 
ret = vmw_fence_obj_init(fman, fence, seqno, mask,
vmw_fence_destroy);
if (unlikely(ret != 0))
goto out_err_init;
 
*p_fence = fence;
return 0;
 
out_err_init:
kfree(fence);
out_no_object:
ttm_mem_global_free(mem_glob, fman->fence_size);
return ret;
}
 
 
static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
{
struct vmw_user_fence *ufence =
container_of(fence, struct vmw_user_fence, fence);
struct vmw_fence_manager *fman = fence->fman;
 
// ttm_base_object_kfree(ufence, base);
/*
* Free kernel space accounting.
*/
ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
fman->user_fence_size);
}
 
static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_fence *ufence =
container_of(base, struct vmw_user_fence, base);
struct vmw_fence_obj *fence = &ufence->fence;
 
*p_base = NULL;
vmw_fence_obj_unreference(&fence);
}
 
int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_manager *fman,
uint32_t seqno,
uint32_t mask,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_user_fence *ufence;
struct vmw_fence_obj *tmp;
struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
int ret;
 
/*
* Kernel memory space accounting, since this object may
* be created by a user-space request.
*/
 
ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
false, false);
if (unlikely(ret != 0))
return ret;
 
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
if (unlikely(ufence == NULL)) {
ret = -ENOMEM;
goto out_no_object;
}
 
ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
mask, vmw_user_fence_destroy);
if (unlikely(ret != 0)) {
kfree(ufence);
goto out_no_object;
}
 
/*
* The base object holds a reference which is freed in
* vmw_user_fence_base_release.
*/
tmp = vmw_fence_obj_reference(&ufence->fence);
ret = ttm_base_object_init(tfile, &ufence->base, false,
VMW_RES_FENCE,
&vmw_user_fence_base_release, NULL);
 
 
if (unlikely(ret != 0)) {
/*
* Free the base object's reference
*/
vmw_fence_obj_unreference(&tmp);
goto out_err;
}
 
*p_fence = &ufence->fence;
*p_handle = ufence->base.hash.key;
 
return 0;
out_err:
tmp = &ufence->fence;
vmw_fence_obj_unreference(&tmp);
out_no_object:
ttm_mem_global_free(mem_glob, fman->user_fence_size);
return ret;
}
 
 
/**
* vmw_fence_fifo_down - signal all unsignaled fence objects.
*/
 
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
unsigned long irq_flags;
struct list_head action_list;
int ret;
 
/*
* The list may be altered while we traverse it, so always
* restart when we've released the fman->lock.
*/
 
spin_lock_irqsave(&fman->lock, irq_flags);
fman->fifo_down = true;
while (!list_empty(&fman->fence_list)) {
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
kref_get(&fence->kref);
spin_unlock_irq(&fman->lock);
 
ret = vmw_fence_obj_wait(fence, fence->signal_mask,
false, false,
VMW_FENCE_WAIT_TIMEOUT);
 
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
vmw_fences_perform_actions(fman, &action_list);
wake_up_all(&fence->queue);
}
 
spin_lock_irq(&fman->lock);
 
BUG_ON(!list_empty(&fence->head));
kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
}
spin_unlock_irqrestore(&fman->lock, irq_flags);
}
 
void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
{
unsigned long irq_flags;
 
spin_lock_irqsave(&fman->lock, irq_flags);
fman->fifo_down = false;
spin_unlock_irqrestore(&fman->lock, irq_flags);
}
 
 
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_fence_wait_arg *arg =
(struct drm_vmw_fence_wait_arg *)data;
unsigned long timeout;
struct ttm_base_object *base;
struct vmw_fence_obj *fence;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
 
/*
* 64-bit division not present on 32-bit systems, so do an
* approximation. (Divide by 1000000).
*/
 
wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
(wait_timeout >> 26);
 
if (!arg->cookie_valid) {
arg->cookie_valid = 1;
arg->kernel_cookie = GetTimerTicks() + wait_timeout;
}
 
base = ttm_base_object_lookup(tfile, arg->handle);
if (unlikely(base == NULL)) {
printk(KERN_ERR "Wait invalid fence object handle "
"0x%08lx.\n",
(unsigned long)arg->handle);
return -EINVAL;
}
 
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 
timeout = GetTimerTicks();
if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
0 : -EBUSY);
goto out;
}
 
timeout = (unsigned long)arg->kernel_cookie - timeout;
 
ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
 
out:
ttm_base_object_unref(&base);
 
/*
* Optionally unref the fence object.
*/
 
if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
return ttm_ref_object_base_unref(tfile, arg->handle,
TTM_REF_USAGE);
return ret;
}
 
int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_fence_signaled_arg *arg =
(struct drm_vmw_fence_signaled_arg *) data;
struct ttm_base_object *base;
struct vmw_fence_obj *fence;
struct vmw_fence_manager *fman;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
 
base = ttm_base_object_lookup(tfile, arg->handle);
if (unlikely(base == NULL)) {
printk(KERN_ERR "Fence signaled invalid fence object handle "
"0x%08lx.\n",
(unsigned long)arg->handle);
return -EINVAL;
}
 
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fence->fman;
 
arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
spin_lock_irq(&fman->lock);
 
arg->signaled_flags = fence->signaled;
arg->passed_seqno = dev_priv->last_read_seqno;
spin_unlock_irq(&fman->lock);
 
ttm_base_object_unref(&base);
 
return 0;
}
 
 
int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_fence_arg *arg =
(struct drm_vmw_fence_arg *) data;
 
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
arg->handle,
TTM_REF_USAGE);
}
 
/**
* vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
*
* @fman: Pointer to a struct vmw_fence_manager
* @event_list: Pointer to linked list of struct vmw_event_fence_action objects
* with pointers to a struct drm_file object about to be closed.
*
* This function removes all pending fence events with references to a
* specific struct drm_file object about to be closed. The caller is required
* to pass a list of all struct vmw_event_fence_action objects with such
* events attached. This function is typically called before the
* struct drm_file object's event management is taken down.
*/
void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
struct list_head *event_list)
{
struct vmw_event_fence_action *eaction;
struct drm_pending_event *event;
unsigned long irq_flags;
 
while (1) {
spin_lock_irqsave(&fman->lock, irq_flags);
if (list_empty(event_list))
goto out_unlock;
eaction = list_first_entry(event_list,
struct vmw_event_fence_action,
fpriv_head);
list_del_init(&eaction->fpriv_head);
event = eaction->event;
eaction->event = NULL;
spin_unlock_irqrestore(&fman->lock, irq_flags);
event->destroy(event);
}
out_unlock:
spin_unlock_irqrestore(&fman->lock, irq_flags);
}
 
 
/**
* vmw_event_fence_action_seq_passed
*
* @action: The struct vmw_fence_action embedded in a struct
* vmw_event_fence_action.
*
* This function is called when the seqno of the fence where @action is
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context, and may be called
* from irq context.
*/
static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
struct drm_file *file_priv;
unsigned long irq_flags;
 
if (unlikely(event == NULL))
return;
 
file_priv = event->file_priv;
spin_lock_irqsave(&dev->event_lock, irq_flags);
/*
if (likely(eaction->tv_sec != NULL)) {
struct timeval tv;
 
do_gettimeofday(&tv);
*eaction->tv_sec = tv.tv_sec;
*eaction->tv_usec = tv.tv_usec;
}
*/
list_del_init(&eaction->fpriv_head);
list_add_tail(&eaction->event->link, &file_priv->event_list);
eaction->event = NULL;
wake_up_all(&file_priv->event_wait);
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
}
 
/**
* vmw_event_fence_action_cleanup
*
* @action: The struct vmw_fence_action embedded in a struct
* vmw_event_fence_action.
*
* This function is the struct vmw_fence_action destructor. It's typically
* called from a workqueue.
*/
static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
struct vmw_fence_manager *fman = eaction->fence->fman;
unsigned long irq_flags;
 
spin_lock_irqsave(&fman->lock, irq_flags);
list_del(&eaction->fpriv_head);
spin_unlock_irqrestore(&fman->lock, irq_flags);
 
vmw_fence_obj_unreference(&eaction->fence);
kfree(eaction);
}
 
 
/**
* vmw_fence_obj_add_action - Add an action to a fence object.
*
* @fence - The fence object.
* @action - The action to add.
*
* Note that the action callbacks may be executed before this function
* returns.
*/
void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fence->fman;
unsigned long irq_flags;
bool run_update = false;
 
mutex_lock(&fman->goal_irq_mutex);
spin_lock_irqsave(&fman->lock, irq_flags);
 
fman->pending_actions[action->type]++;
if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
struct list_head action_list;
 
INIT_LIST_HEAD(&action_list);
list_add_tail(&action->head, &action_list);
vmw_fences_perform_actions(fman, &action_list);
} else {
list_add_tail(&action->head, &fence->seq_passed_actions);
 
/*
* This function may set fman::seqno_valid, so it must
* be run with the goal_irq_mutex held.
*/
run_update = vmw_fence_goal_check_locked(fence);
}
 
spin_unlock_irqrestore(&fman->lock, irq_flags);
 
if (run_update) {
if (!fman->goal_irq_on) {
fman->goal_irq_on = true;
vmw_goal_waiter_add(fman->dev_priv);
}
vmw_fences_update(fman);
}
mutex_unlock(&fman->goal_irq_mutex);
 
}
 
/**
* vmw_event_fence_action_create - Post an event for sending when a fence
* object seqno has passed.
*
* @file_priv: The file connection on which the event should be posted.
* @fence: The fence object on which to post the event.
* @event: Event to be posted. This event should've been alloced
* using k[mz]alloc, and should've been completely initialized.
* @interruptible: Interruptible waits if possible.
*
* As a side effect, the object pointed to by @event may have been
* freed when this function returns. If this function returns with
* an error code, the caller needs to free that object.
*/
 
int vmw_event_fence_action_queue(struct drm_file *file_priv,
struct vmw_fence_obj *fence,
struct drm_pending_event *event,
uint32_t *tv_sec,
uint32_t *tv_usec,
bool interruptible)
{
struct vmw_event_fence_action *eaction;
struct vmw_fence_manager *fman = fence->fman;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
unsigned long irq_flags;
 
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
if (unlikely(eaction == NULL))
return -ENOMEM;
 
eaction->event = event;
 
eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
eaction->action.cleanup = vmw_event_fence_action_cleanup;
eaction->action.type = VMW_ACTION_EVENT;
 
eaction->fence = vmw_fence_obj_reference(fence);
eaction->dev = fman->dev_priv->dev;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
 
spin_lock_irqsave(&fman->lock, irq_flags);
list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
spin_unlock_irqrestore(&fman->lock, irq_flags);
 
vmw_fence_obj_add_action(fence, &eaction->action);
 
return 0;
}
 
struct vmw_event_fence_pending {
struct drm_pending_event base;
struct drm_vmw_event_fence event;
};
 
int vmw_event_fence_action_create(struct drm_file *file_priv,
struct vmw_fence_obj *fence,
uint32_t flags,
uint64_t user_data,
bool interruptible)
{
struct vmw_event_fence_pending *event;
struct drm_device *dev = fence->fman->dev_priv->dev;
unsigned long irq_flags;
int ret;
 
spin_lock_irqsave(&dev->event_lock, irq_flags);
 
ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
if (likely(ret == 0))
file_priv->event_space -= sizeof(event->event);
 
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
 
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate event space for this file.\n");
goto out_no_space;
}
 
 
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (unlikely(event == NULL)) {
DRM_ERROR("Failed to allocate an event.\n");
ret = -ENOMEM;
goto out_no_event;
}
 
event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
event->event.base.length = sizeof(*event);
event->event.user_data = user_data;
 
event->base.event = &event->event.base;
event->base.file_priv = file_priv;
event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
 
 
if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
&event->event.tv_sec,
&event->event.tv_usec,
interruptible);
else
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
NULL,
NULL,
interruptible);
if (ret != 0)
goto out_no_queue;
 
out_no_queue:
event->base.destroy(&event->base);
out_no_event:
spin_lock_irqsave(&dev->event_lock, irq_flags);
file_priv->event_space += sizeof(*event);
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
out_no_space:
return ret;
}
 
#if 0
int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_fence_event_arg *arg =
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
uint32_t handle;
int ret;
 
/*
* Look up an existing fence object,
* and if user-space wants a new reference,
* add one.
*/
if (arg->handle) {
struct ttm_base_object *base =
ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
 
if (unlikely(base == NULL)) {
DRM_ERROR("Fence event invalid fence object handle "
"0x%08lx.\n",
(unsigned long)arg->handle);
return -EINVAL;
}
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
 
if (user_fence_rep != NULL) {
bool existed;
 
ret = ttm_ref_object_add(vmw_fp->tfile, base,
TTM_REF_USAGE, &existed);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
goto out_no_ref_obj;
}
handle = base->hash.key;
}
ttm_base_object_unref(&base);
}
 
/*
* Create a new fence object.
*/
if (!fence) {
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
&fence,
(user_fence_rep) ?
&handle : NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Fence event failed to create fence.\n");
return ret;
}
}
 
BUG_ON(fence == NULL);
 
if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
ret = vmw_event_fence_action_create(file_priv, fence,
arg->flags,
arg->user_data,
true);
else
ret = vmw_event_fence_action_create(file_priv, fence,
arg->flags,
arg->user_data,
true);
 
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to attach event to fence.\n");
goto out_no_create;
}
 
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
handle);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:
if (user_fence_rep != NULL)
ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
}
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_fence.h
0,0 → 1,123
/**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#ifndef _VMWGFX_FENCE_H_
 
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
 
struct vmw_private;
 
struct vmw_fence_manager;
 
/**
*
*
*/
enum vmw_action_type {
VMW_ACTION_EVENT = 0,
VMW_ACTION_MAX
};
 
struct vmw_fence_action {
struct list_head head;
enum vmw_action_type type;
void (*seq_passed) (struct vmw_fence_action *action);
void (*cleanup) (struct vmw_fence_action *action);
};
 
struct vmw_fence_obj {
struct kref kref;
u32 seqno;
 
struct vmw_fence_manager *fman;
struct list_head head;
uint32_t signaled;
uint32_t signal_mask;
struct list_head seq_passed_actions;
void (*destroy)(struct vmw_fence_obj *fence);
wait_queue_head_t queue;
};
 
extern struct vmw_fence_manager *
vmw_fence_manager_init(struct vmw_private *dev_priv);
 
extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
 
extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
 
extern struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence);
 
extern void vmw_fences_update(struct vmw_fence_manager *fman);
 
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
uint32_t flags);
 
extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
bool lazy,
bool interruptible, unsigned long timeout);
 
extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
 
extern int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
uint32_t mask,
struct vmw_fence_obj **p_fence);
 
extern int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_manager *fman,
uint32_t sequence,
uint32_t mask,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
 
extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
 
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
 
extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
struct list_head *event_list);
/*
extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
struct vmw_fence_obj *fence,
struct drm_pending_event *event,
uint32_t *tv_sec,
uint32_t *tv_usec,
bool interruptible);
*/
 
#endif /* _VMWGFX_FENCE_H_ */
/drivers/video/drm/vmwgfx/vmwgfx_fifo.c
0,0 → 1,575
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#define mb() asm volatile("mfence" : : : "memory")
#define rmb() asm volatile("lfence" : : : "memory")
#define wmb() asm volatile("sfence" : : : "memory")
 
#include "vmwgfx_drv.h"
#include <drm/drmP.h>
#include <drm/ttm/ttm_placement.h>
 
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
 
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
 
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
 
hwversion = ioread32(fifo_mem +
((fifo->capabilities &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION));
 
if (hwversion == 0)
return false;
 
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
return false;
 
/* Non-Screen Object path does not support surfaces */
if (!dev_priv->sou_priv)
return false;
 
return true;
}
 
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
 
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
 
caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
 
return false;
}
 
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t dummy;
 
ENTER();
 
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = KernelAlloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
return -ENOMEM;
 
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
 
mutex_init(&fifo->fifo_mutex);
// init_rwsem(&fifo->rwsem);
 
/*
* Allow mapping the first page read-only to user-space.
*/
 
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
 
mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
 
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
min <<= 2;
 
if (min < PAGE_SIZE)
min = PAGE_SIZE;
 
iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
wmb();
iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
mb();
 
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
mutex_unlock(&dev_priv->hw_mutex);
 
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
 
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
 
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_marker_queue_init(&fifo->marker_queue);
 
int ret = 0; //vmw_fifo_send_fence(dev_priv, &dummy);
LEAVE();
return ret;
}
 
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
mutex_lock(&dev_priv->hw_mutex);
 
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
 
mutex_unlock(&dev_priv->hw_mutex);
}
 
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
mutex_lock(&dev_priv->hw_mutex);
 
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
 
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
vmw_write(dev_priv, SVGA_REG_ENABLE,
dev_priv->enable_state);
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
 
mutex_unlock(&dev_priv->hw_mutex);
vmw_marker_queue_takedown(&fifo->marker_queue);
 
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
}
 
if (likely(fifo->dynamic_buffer != NULL)) {
vfree(fifo->dynamic_buffer);
fifo->dynamic_buffer = NULL;
}
}
 
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
 
return ((max - next_cmd) + (stop - min) <= bytes);
}
 
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
int ret = 0;
unsigned long end_jiffies = GetTimerTicks() + timeout;
DEFINE_WAIT(__wait);
 
DRM_INFO("Fifo wait noirq.\n");
 
for (;;) {
// prepare_to_wait(&dev_priv->fifo_queue, &__wait,
// (interruptible) ?
// TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (!vmw_fifo_is_full(dev_priv, bytes))
break;
if (time_after_eq(GetTimerTicks(), end_jiffies)) {
ret = -EBUSY;
DRM_ERROR("SVGA device lockup.\n");
break;
}
delay(1);
}
// finish_wait(&dev_priv->fifo_queue, &__wait);
wake_up_all(&dev_priv->fifo_queue);
DRM_INFO("Fifo noirq exit.\n");
return ret;
}
 
static int vmw_fifo_wait(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
long ret = 1L;
unsigned long irq_flags;
 
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
return 0;
 
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
 
mutex_lock(&dev_priv->hw_mutex);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
 
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
else
ret = wait_event_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
 
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
 
mutex_lock(&dev_priv->hw_mutex);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
 
return ret;
}
 
/**
* Reserve @bytes number of bytes in the fifo.
*
* This function will return NULL (error) on two conditions:
* If it timeouts waiting for fifo space, or if @bytes is larger than the
* available fifo space.
*
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
int ret;
 
mutex_lock(&fifo_state->fifo_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 
if (unlikely(bytes >= (max - min)))
goto out_err;
 
BUG_ON(fifo_state->reserved_size != 0);
BUG_ON(fifo_state->dynamic_buffer != NULL);
 
fifo_state->reserved_size = bytes;
 
while (1) {
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
 
if (next_cmd >= stop) {
if (likely((next_cmd + bytes < max ||
(next_cmd + bytes == max && stop > min))))
reserve_in_place = true;
 
else if (vmw_fifo_is_full(dev_priv, bytes)) {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
} else
need_bounce = true;
 
} else {
 
if (likely((next_cmd + bytes < stop)))
reserve_in_place = true;
else {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
}
}
 
if (reserve_in_place) {
if (reserveable || bytes <= sizeof(uint32_t)) {
fifo_state->using_bounce_buffer = false;
 
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
return fifo_mem + (next_cmd >> 2);
} else {
need_bounce = true;
}
}
 
if (need_bounce) {
fifo_state->using_bounce_buffer = true;
if (bytes < fifo_state->static_buffer_size)
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = kmalloc(bytes,0);
return fifo_state->dynamic_buffer;
}
}
}
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
return NULL;
}
 
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
uint32_t chunk_size = max - next_cmd;
uint32_t rest;
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
 
if (bytes < chunk_size)
chunk_size = bytes;
 
iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
// mb();
memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
if (rest)
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
rest);
}
 
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
 
while (bytes > 0) {
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
mb();
bytes -= sizeof(uint32_t);
}
}
 
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
 
fifo_state->reserved_size = 0;
 
if (fifo_state->using_bounce_buffer) {
if (reserveable)
vmw_fifo_res_copy(fifo_state, fifo_mem,
next_cmd, max, min, bytes);
else
vmw_fifo_slow_copy(fifo_state, fifo_mem,
next_cmd, max, min, bytes);
 
if (fifo_state->dynamic_buffer) {
vfree(fifo_state->dynamic_buffer);
fifo_state->dynamic_buffer = NULL;
}
 
}
 
// down_write(&fifo_state->rwsem);
if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes;
if (next_cmd >= max)
next_cmd -= max - min;
mb();
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
}
 
if (reserveable)
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
// mb();
// up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
}
 
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
void *fm;
int ret = 0;
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
 
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
false, 3*HZ);
goto out_err;
}
 
do {
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0);
 
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
 
/*
* Don't request hardware to send a fence. The
* waiting code in vmwgfx_irq.c will emulate this.
*/
 
vmw_fifo_commit(dev_priv, 0);
return 0;
}
 
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
cmd_fence = (struct svga_fifo_cmd_fence *)
((unsigned long)fm + sizeof(__le32));
 
iowrite32(*seqno, &cmd_fence->fence);
vmw_fifo_commit(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state);
 
out_err:
return ret;
}
 
/**
* vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
* This function is used to emit a dummy occlusion query with
* no primitives rendered between query begin and query end.
* It's used to provide a query barrier, in order to know that when
* this query is finished, all preceding queries are also finished.
*
* A Query results structure should have been initialized at the start
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
* must also be either reserved or pinned when this function is called.
*
* Returns -ENOMEM on failure to reserve fifo space.
*/
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure.
*/
 
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
} *cmd;
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 
if (unlikely(cmd == NULL)) {
DRM_ERROR("Out of fifo space for dummy query.\n");
return -ENOMEM;
}
 
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
 
if (bo->mem.mem_type == TTM_PL_VRAM) {
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
cmd->body.guestResult.offset = bo->offset;
} else {
cmd->body.guestResult.gmrId = bo->mem.start;
cmd->body.guestResult.offset = 0;
}
 
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
}
/drivers/video/drm/vmwgfx/vmwgfx_gmr.c
0,0 → 1,137
/**************************************************************************
*
* Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#define mb() asm volatile("mfence" : : : "memory")
#define rmb() asm volatile("lfence" : : : "memory")
#define wmb() asm volatile("sfence" : : : "memory")
 
#include <drm/drmP.h>
#include <drm/ttm/ttm_bo_driver.h>
#include "vmwgfx_drv.h"
 
 
#define VMW_PPN_SIZE sizeof(unsigned long)
 
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
struct page *pages[],
unsigned long num_pages,
int gmr_id)
{
SVGAFifoCmdDefineGMR2 define_cmd;
SVGAFifoCmdRemapGMR2 remap_cmd;
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
uint32_t *cmd;
uint32_t *cmd_orig;
uint32_t i;
 
cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
 
define_cmd.gmrId = gmr_id;
define_cmd.numPages = num_pages;
 
remap_cmd.gmrId = gmr_id;
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
remap_cmd.offsetPages = 0;
remap_cmd.numPages = num_pages;
 
*cmd++ = SVGA_CMD_DEFINE_GMR2;
memcpy(cmd, &define_cmd, sizeof(define_cmd));
cmd += sizeof(define_cmd) / sizeof(uint32);
 
*cmd++ = SVGA_CMD_REMAP_GMR2;
memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
cmd += sizeof(remap_cmd) / sizeof(uint32);
 
for (i = 0; i < num_pages; ++i) {
if (VMW_PPN_SIZE <= 4)
*cmd = page_to_pfn(*pages++);
else
*((uint64_t *)cmd) = page_to_pfn(*pages++);
 
cmd += VMW_PPN_SIZE / sizeof(*cmd);
}
 
vmw_fifo_commit(dev_priv, define_size + remap_size);
 
return 0;
}
 
static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
int gmr_id)
{
SVGAFifoCmdDefineGMR2 define_cmd;
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t *cmd;
 
cmd = vmw_fifo_reserve(dev_priv, define_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("GMR2 unbind failed.\n");
return;
}
define_cmd.gmrId = gmr_id;
define_cmd.numPages = 0;
 
*cmd++ = SVGA_CMD_DEFINE_GMR2;
memcpy(cmd, &define_cmd, sizeof(define_cmd));
 
vmw_fifo_commit(dev_priv, define_size);
}
 
 
int vmw_gmr_bind(struct vmw_private *dev_priv,
struct page *pages[],
unsigned long num_pages,
int gmr_id)
{
struct list_head desc_pages;
int ret;
 
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
 
printf("%s epic fail\n",__FUNCTION__);
return -EINVAL;
}
 
 
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
vmw_gmr2_unbind(dev_priv, gmr_id);
return;
}
 
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
wmb();
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
mb();
mutex_unlock(&dev_priv->hw_mutex);
}
/drivers/video/drm/vmwgfx/vmwgfx_gmrid_manager.c
0,0 → 1,161
/**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
 
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
 
struct vmwgfx_gmrid_man {
spinlock_t lock;
struct ida gmr_ida;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t used_gmr_pages;
};
 
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
int ret = 0;
int id;
 
mem->mm_node = NULL;
 
spin_lock(&gman->lock);
 
if (gman->max_gmr_pages > 0) {
gman->used_gmr_pages += bo->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto out_err_locked;
}
 
do {
spin_unlock(&gman->lock);
if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
ret = -ENOMEM;
goto out_err;
}
spin_lock(&gman->lock);
 
ret = ida_get_new(&gman->gmr_ida, &id);
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
ida_remove(&gman->gmr_ida, id);
ret = 0;
goto out_err_locked;
}
} while (ret == -EAGAIN);
 
if (likely(ret == 0)) {
mem->mm_node = gman;
mem->start = id;
mem->num_pages = bo->num_pages;
} else
goto out_err_locked;
 
spin_unlock(&gman->lock);
return 0;
 
out_err:
spin_lock(&gman->lock);
out_err_locked:
gman->used_gmr_pages -= bo->num_pages;
spin_unlock(&gman->lock);
return ret;
}
 
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
 
if (mem->mm_node) {
spin_lock(&gman->lock);
ida_remove(&gman->gmr_ida, mem->start);
gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
mem->mm_node = NULL;
}
}
 
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct vmw_private *dev_priv =
container_of(man->bdev, struct vmw_private, bdev);
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
 
if (unlikely(gman == NULL))
return -ENOMEM;
 
spin_lock_init(&gman->lock);
gman->max_gmr_pages = dev_priv->max_gmr_pages;
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
gman->max_gmr_ids = p_size;
man->priv = (void *) gman;
return 0;
}
 
static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
{
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
 
if (gman) {
ida_destroy(&gman->gmr_ida);
kfree(gman);
}
return 0;
}
 
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
printk(KERN_INFO "%s: No debug info available for the GMR "
"id manager.\n", prefix);
}
 
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
vmw_gmrid_man_init,
vmw_gmrid_man_takedown,
vmw_gmrid_man_get_node,
vmw_gmrid_man_put_node,
vmw_gmrid_man_debug
};
/drivers/video/drm/vmwgfx/vmwgfx_irq.c
0,0 → 1,326
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
 
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
 
#define VMW_FENCE_WRAP (1 << 24)
 
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status;
 
spin_lock(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & dev_priv->irq_mask;
spin_unlock(&dev_priv->irq_lock);
 
if (likely(status))
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
if (!masked_status)
return IRQ_NONE;
 
if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
SVGA_IRQFLAG_FENCE_GOAL)) {
vmw_fences_update(dev_priv->fman);
wake_up_all(&dev_priv->fence_queue);
}
 
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
 
 
return IRQ_HANDLED;
}
 
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
uint32_t busy;
 
mutex_lock(&dev_priv->hw_mutex);
busy = vmw_read(dev_priv, SVGA_REG_BUSY);
mutex_unlock(&dev_priv->hw_mutex);
 
return (busy == 0);
}
 
void vmw_update_seqno(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 
if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno;
vmw_marker_pull(&fifo_state->marker_queue, seqno);
vmw_fences_update(dev_priv->fman);
}
}
 
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
struct vmw_fifo_state *fifo_state;
bool ret;
 
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
 
fifo_state = &dev_priv->fifo;
vmw_update_seqno(dev_priv, fifo_state);
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
 
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, seqno))
return true;
 
/**
* Then check if the seqno is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
 
ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
> VMW_FENCE_WRAP);
 
return ret;
}
 
int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
uint32_t seqno,
bool interruptible,
unsigned long timeout)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 
uint32_t count = 0;
uint32_t signal_seq;
int ret;
unsigned long end_jiffies = GetTimerTicks() + timeout;
bool (*wait_condition)(struct vmw_private *, uint32_t);
DEFINE_WAIT(__wait);
 
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
&vmw_seqno_passed;
 
/**
* Block command submission while waiting for idle.
*/
 
// if (fifo_idle)
// down_read(&fifo_state->rwsem);
signal_seq = atomic_read(&dev_priv->marker_seq);
ret = 0;
 
for (;;) {
// prepare_to_wait(&dev_priv->fence_queue, &__wait,
// (interruptible) ?
// TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (wait_condition(dev_priv, seqno))
break;
if (time_after_eq(GetTimerTicks(), end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
break;
}
if (lazy)
delay(1);
else if ((++count & 0x0F) == 0) {
/**
* FIXME: Use schedule_hr_timeout here for
* newer kernels and lower CPU utilization.
*/
 
delay(1);
}
}
// finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
}
wake_up_all(&dev_priv->fence_queue);
// if (fifo_idle)
// up_read(&fifo_state->rwsem);
 
return ret;
}
 
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->hw_mutex);
if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_ANY_FENCE,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
}
 
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->hw_mutex);
if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
}
 
 
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->hw_mutex);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FENCE_GOAL,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
}
 
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
mutex_lock(&dev_priv->hw_mutex);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
}
 
int vmw_wait_seqno(struct vmw_private *dev_priv,
bool lazy, uint32_t seqno,
bool interruptible, unsigned long timeout)
{
long ret;
struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return 0;
 
if (likely(vmw_seqno_passed(dev_priv, seqno)))
return 0;
 
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 
if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
return vmw_fallback_wait(dev_priv, lazy, true, seqno,
interruptible, timeout);
 
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return vmw_fallback_wait(dev_priv, lazy, false, seqno,
interruptible, timeout);
 
vmw_seqno_waiter_add(dev_priv);
 
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fence_queue,
vmw_seqno_passed(dev_priv, seqno),
timeout);
else
ret = wait_event_timeout
(dev_priv->fence_queue,
vmw_seqno_passed(dev_priv, seqno),
timeout);
 
vmw_seqno_waiter_remove(dev_priv);
 
if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
 
return ret;
}
 
void vmw_irq_preinstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
 
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
 
spin_lock_init(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
 
int vmw_irq_postinstall(struct drm_device *dev)
{
return 0;
}
 
void vmw_irq_uninstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
 
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
 
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
mutex_unlock(&dev_priv->hw_mutex);
 
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
 
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
list_del_init(&wait->task_list);
return 1;
}
 
/drivers/video/drm/vmwgfx/vmwgfx_kms.c
0,0 → 1,2073
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_kms.h"
 
 
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
 
struct vmw_clip_rect {
int x1, x2, y1, y2;
};
 
/**
* Clip @num_rects number of @rects against @clip storing the
* results in @out_rects and the number of passed rects in @out_num.
*/
void vmw_clip_cliprects(struct drm_clip_rect *rects,
int num_rects,
struct vmw_clip_rect clip,
SVGASignedRect *out_rects,
int *out_num)
{
int i, k;
 
for (i = 0, k = 0; i < num_rects; i++) {
int x1 = max_t(int, clip.x1, rects[i].x1);
int y1 = max_t(int, clip.y1, rects[i].y1);
int x2 = min_t(int, clip.x2, rects[i].x2);
int y2 = min_t(int, clip.y2, rects[i].y2);
 
if (x1 >= x2)
continue;
if (y1 >= y2)
continue;
 
out_rects[k].left = x1;
out_rects[k].top = y1;
out_rects[k].right = x2;
out_rects[k].bottom = y2;
k++;
}
 
*out_num = k;
}
 
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
// if (du->cursor_surface)
// vmw_surface_unreference(&du->cursor_surface);
// if (du->cursor_dmabuf)
// vmw_dmabuf_unreference(&du->cursor_dmabuf);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
}
 
#if 0
/*
* Display Unit Cursor functions
*/
 
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
struct {
u32 cmd;
SVGAFifoCmdDefineAlphaCursor cursor;
} *cmd;
u32 image_size = width * height * 4;
u32 cmd_size = sizeof(*cmd) + image_size;
 
if (!image)
return -EINVAL;
 
cmd = vmw_fifo_reserve(dev_priv, cmd_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
 
memset(cmd, 0, sizeof(*cmd));
 
memcpy(&cmd[1], image, image_size);
 
cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
cmd->cursor.id = cpu_to_le32(0);
cmd->cursor.width = cpu_to_le32(width);
cmd->cursor.height = cpu_to_le32(height);
cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
 
vmw_fifo_commit(dev_priv, cmd_size);
 
return 0;
}
 
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
void *virtual;
bool dummy;
int ret;
 
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
}
 
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
 
virtual = ttm_kmap_obj_virtual(&map, &dummy);
ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
hotspotX, hotspotY);
 
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(&dmabuf->base);
 
return ret;
}
 
 
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
 
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
}
 
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
 
/*
* FIXME: Unclear whether there's any global state touched by the
* cursor_set function, especially vmw_cursor_update_position looks
* suspicious. For now take the easy route and reacquire all locks. We
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
mutex_unlock(&crtc->mutex);
drm_modeset_lock_all(dev_priv->dev);
 
/* A lot of the code assumes this */
if (handle && (width != 64 || height != 64)) {
ret = -EINVAL;
goto out;
}
 
if (handle) {
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 
ret = vmw_user_lookup_handle(dev_priv, tfile,
handle, &surface, &dmabuf);
if (ret) {
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
ret = -EINVAL;
goto out;
}
}
 
/* need to do this before taking down old image */
if (surface && !surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
vmw_surface_unreference(&surface);
ret = -EINVAL;
goto out;
}
 
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
vmw_surface_unreference(&du->cursor_surface);
}
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
 
/* setup new image */
if (surface) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_surface = surface;
 
du->cursor_surface->snooper.crtc = crtc;
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
 
ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
ret = 0;
goto out;
}
 
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
 
ret = 0;
out:
drm_modeset_unlock_all(dev_priv->dev);
mutex_lock(&crtc->mutex);
 
return ret;
}
 
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
 
du->cursor_x = x + crtc->x;
du->cursor_y = y + crtc->y;
 
/*
* FIXME: Unclear whether there's any global state touched by the
* cursor_set function, especially vmw_cursor_update_position looks
* suspicious. For now take the easy route and reacquire all locks. We
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
mutex_unlock(&crtc->mutex);
drm_modeset_lock_all(dev_priv->dev);
 
vmw_cursor_update_position(dev_priv, shown,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
 
drm_modeset_unlock_all(dev_priv->dev);
mutex_lock(&crtc->mutex);
 
return 0;
}
 
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
SVGA3dCopyBox *box;
unsigned box_count;
void *virtual;
bool dummy;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int i, ret;
 
cmd = container_of(header, struct vmw_dma_cmd, header);
 
/* No snooper installed */
if (!srf->snooper.image)
return;
 
if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
DRM_ERROR("face and mipmap for cursors should never != 0\n");
return;
}
 
if (cmd->header.size < 64) {
DRM_ERROR("at least one full copy box must be given\n");
return;
}
 
box = (SVGA3dCopyBox *)&cmd[1];
box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
sizeof(SVGA3dCopyBox);
 
if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
box->d != 1 || box_count != 1) {
/* TODO handle none page aligned offsets */
/* TODO handle more dst & src != 0 */
/* TODO handle more then one copy */
DRM_ERROR("Cant snoop dma request for cursor!\n");
DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
box->srcx, box->srcy, box->srcz,
box->x, box->y, box->z,
box->w, box->h, box->d, box_count,
cmd->dma.guest.ptr.offset);
return;
}
 
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
 
ret = ttm_bo_reserve(bo, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
}
 
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
 
virtual = ttm_kmap_obj_virtual(&map, &dummy);
 
if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
memcpy(srf->snooper.image, virtual, 64*64*4);
} else {
/* Image is unsigned pointer. */
for (i = 0; i < box->h; i++)
memcpy(srf->snooper.image + i * 64,
virtual + i * cmd->dma.guest.pitch,
box->w * 4);
}
 
srf->snooper.age++;
 
/* we can't call this function from this function since execbuf has
* reserved fifo space.
*
* if (srf->snooper.crtc)
* vmw_ldu_crtc_cursor_update_image(dev_priv,
* srf->snooper.image, 64, 64,
* du->hotspot_x, du->hotspot_y);
*/
 
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(bo);
}
 
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
 
mutex_lock(&dev->mode_config.mutex);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
if (!du->cursor_surface ||
du->cursor_age == du->cursor_surface->snooper.age)
continue;
 
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv,
du->cursor_surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
}
 
mutex_unlock(&dev->mode_config.mutex);
}
#endif
 
/*
* Generic framebuffer code
*/
 
/*
* Surface framebuffer code
*/
 
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
 
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
struct list_head head;
struct drm_master *master;
};
 
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
// struct vmw_master *vmaster = vmw_master(vfbs->master);
 
 
// mutex_lock(&vmaster->fb_surf_mutex);
// list_del(&vfbs->head);
// mutex_unlock(&vmaster->fb_surf_mutex);
 
// drm_master_put(&vfbs->master);
// drm_framebuffer_cleanup(framebuffer);
// vmw_surface_unreference(&vfbs->surface);
// ttm_base_object_unref(&vfbs->base.user_obj);
 
kfree(vfbs);
}
 
static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
SVGASignedRect *blits;
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
if (crtc->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
 
BUG_ON(!clips || !num_clips);
 
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
 
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
ret = -ENOMEM;
goto out_free_tmp;
}
 
/* setup blits pointer */
blits = (SVGASignedRect *)&cmd[1];
 
/* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
bottom = clips->y2;
 
/* skip the first clip rect */
for (i = 1, clips_ptr = clips + inc;
i < num_clips; i++, clips_ptr += inc) {
left = min_t(int, left, (int)clips_ptr->x1);
right = max_t(int, right, (int)clips_ptr->x2);
top = min_t(int, top, (int)clips_ptr->y1);
bottom = max_t(int, bottom, (int)clips_ptr->y2);
}
 
/* only need to do this once */
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
 
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
 
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
tmp[i].x1 = clips_ptr->x1 - left;
tmp[i].x2 = clips_ptr->x2 - left;
tmp[i].y1 = clips_ptr->y1 - top;
tmp[i].y2 = clips_ptr->y2 - top;
}
 
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
struct vmw_clip_rect clip;
int num;
 
clip.x1 = left - unit->crtc.x;
clip.y1 = top - unit->crtc.y;
clip.x2 = right - unit->crtc.x;
clip.y2 = bottom - unit->crtc.y;
 
/* skip any crtcs that misses the clip region */
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
 
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
 
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
 
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
cmd->body.destScreenId = unit->unit;
 
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
/* if no cliprects hit skip this */
if (num == 0)
continue;
 
/* only return the last fence */
if (out_fence && *out_fence)
vmw_fence_obj_unreference(out_fence);
 
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, out_fence);
 
if (unlikely(ret != 0))
break;
}
 
 
kfree(cmd);
out_free_tmp:
kfree(tmp);
 
return ret;
}
 
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
// struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct drm_clip_rect norect;
int ret, inc = 1;
 
// if (unlikely(vfbs->master != file_priv->master))
// return -EINVAL;
 
/* Require ScreenObject support for 3D */
if (!dev_priv->sou_priv)
return -EINVAL;
 
// ret = ttm_read_lock(&vmaster->lock, true);
// if (unlikely(ret != 0))
// return ret;
 
if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
norect.x2 = framebuffer->width;
norect.y2 = framebuffer->height;
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
num_clips /= 2;
inc = 2; /* skip source rects */
}
 
ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
flags, color,
clips, num_clips, inc, NULL);
 
// ttm_read_unlock(&vmaster->lock);
return 0;
}
 
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
};
 
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
*mode_cmd)
 
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
// struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/* 3D is only supported on HWv8 hosts which supports screen objects */
if (!dev_priv->sou_priv)
return -ENOSYS;
 
/*
* Sanity checks.
*/
 
/* Surface must be marked as a scanout. */
if (unlikely(!surface->scanout))
return -EINVAL;
 
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
surface->sizes[0].height < mode_cmd->height ||
surface->sizes[0].depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
}
 
switch (mode_cmd->depth) {
case 32:
format = SVGA3D_A8R8G8B8;
break;
case 24:
format = SVGA3D_X8R8G8B8;
break;
case 16:
format = SVGA3D_R5G6B5;
break;
case 15:
format = SVGA3D_A1R5G5B5;
break;
case 8:
format = SVGA3D_LUMINANCE8;
break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
 
if (unlikely(format != surface->format)) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
 
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
goto out_err1;
}
 
if (!vmw_surface_reference(surface)) {
DRM_ERROR("failed to reference surface %p\n", surface);
ret = -EINVAL;
goto out_err2;
}
 
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
vfbs->surface = surface;
vfbs->base.user_handle = mode_cmd->handle;
// vfbs->master = drm_master_get(file_priv->master);
 
// mutex_lock(&vmaster->fb_surf_mutex);
// list_add_tail(&vfbs->head, &vmaster->fb_surf);
// mutex_unlock(&vmaster->fb_surf_mutex);
 
*out = &vfbs->base;
 
ret = drm_framebuffer_init(dev, &vfbs->base.base,
&vmw_framebuffer_surface_funcs);
if (ret)
goto out_err3;
 
return 0;
 
out_err3:
vmw_surface_unreference(&surface);
out_err2:
kfree(vfbs);
out_err1:
return ret;
}
 
/*
* Dmabuf framebuffer code
*/
 
#define vmw_framebuffer_to_vfbd(x) \
container_of(x, struct vmw_framebuffer_dmabuf, base.base)
 
struct vmw_framebuffer_dmabuf {
struct vmw_framebuffer base;
struct vmw_dma_buffer *buffer;
};
 
void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
 
// drm_framebuffer_cleanup(framebuffer);
// vmw_dmabuf_unreference(&vfbd->buffer);
// ttm_base_object_unref(&vfbd->base.user_obj);
 
kfree(vfbd);
}
 
static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment)
{
size_t fifo_size;
int i;
 
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
 
fifo_size = sizeof(*cmd) * num_clips;
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
 
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd[i].body.x = cpu_to_le32(clips->x1);
cmd[i].body.y = cpu_to_le32(clips->y1);
cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
}
 
vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
 
static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
int depth = framebuffer->base.depth;
size_t fifo_size;
int ret;
 
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
 
/* Emulate RGBA support, contrary to svga_reg.h this is not
* supported by hosts. This is only a problem if we are reading
* this value later and expecting what we uploaded back.
*/
if (depth == 32)
depth = 24;
 
fifo_size = sizeof(*cmd);
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
}
 
memset(cmd, 0, fifo_size);
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
cmd->body.ptr.gmrId = framebuffer->user_handle;
cmd->body.ptr.offset = 0;
 
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, NULL);
 
kfree(cmd);
 
return ret;
}
 
static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment,
struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
int i, k, num_units, ret;
struct drm_crtc *crtc;
size_t fifo_size;
 
struct {
uint32_t header;
SVGAFifoCmdBlitGMRFBToScreen body;
} *blits;
 
ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
if (unlikely(ret != 0))
return ret; /* define_gmrfb prints warnings */
 
fifo_size = sizeof(*blits) * num_clips;
blits = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(blits == NULL)) {
DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
}
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
 
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
int hit_num = 0;
 
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += increment) {
int clip_x1 = clips_ptr->x1 - unit->crtc.x;
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
int move_x, move_y;
 
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
clip_y1 >= unit->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
continue;
 
/* clip size to crtc size */
clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
 
/* translate both src and dest to bring clip into screen */
move_x = min_t(int, clip_x1, 0);
move_y = min_t(int, clip_y1, 0);
 
/* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
blits[hit_num].body.destRect.left = clip_x1 - move_x;
blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
}
 
/* no clips hit the crtc */
if (hit_num == 0)
continue;
 
/* only return the last fence */
if (out_fence && *out_fence)
vmw_fence_obj_unreference(out_fence);
 
fifo_size = sizeof(*blits) * hit_num;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
fifo_size, 0, NULL, out_fence);
 
if (unlikely(ret != 0))
break;
}
 
kfree(blits);
 
return ret;
}
 
int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
// struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
int ret, increment = 1;
 
// ret = ttm_read_lock(&vmaster->lock, true);
// if (unlikely(ret != 0))
// return ret;
 
if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
norect.x2 = framebuffer->width;
norect.y2 = framebuffer->height;
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
num_clips /= 2;
increment = 2;
}
 
if (dev_priv->ldu_priv) {
ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
flags, color,
clips, num_clips, increment);
} else {
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
flags, color,
clips, num_clips, increment, NULL);
}
 
// ttm_read_unlock(&vmaster->lock);
return ret;
}
 
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.destroy = vmw_framebuffer_dmabuf_destroy,
.dirty = vmw_framebuffer_dmabuf_dirty,
};
 
/**
* Pin the dmabuffer to the start of vram.
*/
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
 
/* This code should not be used with screen objects */
BUG_ON(dev_priv->sou_priv);
 
// vmw_overlay_pause_all(dev_priv);
 
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
 
// vmw_overlay_resume_all(dev_priv);
 
WARN_ON(ret != 0);
 
return 0;
}
 
static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
 
if (!vfbd->buffer) {
WARN_ON(!vfbd->buffer);
return 0;
}
 
return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
}
 
#if 0
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
*mode_cmd)
 
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
unsigned int requested_size;
int ret;
 
requested_size = mode_cmd->height * mode_cmd->pitch;
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
}
 
/* Limited framebuffer color depth support for screen objects */
if (dev_priv->sou_priv) {
switch (mode_cmd->depth) {
case 32:
case 24:
/* Only support 32 bpp for 32 and 24 depth fbs */
if (mode_cmd->bpp == 32)
break;
 
DRM_ERROR("Invalid color depth/bbp: %d %d\n",
mode_cmd->depth, mode_cmd->bpp);
return -EINVAL;
case 16:
case 15:
/* Only support 16 bpp for 16 and 15 depth fbs */
if (mode_cmd->bpp == 16)
break;
 
DRM_ERROR("Invalid color depth/bbp: %d %d\n",
mode_cmd->depth, mode_cmd->bpp);
return -EINVAL;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
}
 
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
goto out_err1;
}
 
if (!vmw_dmabuf_reference(dmabuf)) {
DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
ret = -EINVAL;
goto out_err2;
}
 
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
if (!dev_priv->sou_priv) {
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
}
vfbd->base.dmabuf = true;
vfbd->buffer = dmabuf;
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
 
ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_dmabuf_funcs);
if (ret)
goto out_err3;
 
return 0;
 
out_err3:
vmw_dmabuf_unreference(&dmabuf);
out_err2:
kfree(vfbd);
out_err1:
return ret;
}
#endif
 
/*
* Generic Kernel modesetting functions
*/
 
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd2)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
struct drm_mode_fb_cmd mode_cmd;
int ret;
 
mode_cmd.width = mode_cmd2->width;
mode_cmd.height = mode_cmd2->height;
mode_cmd.pitch = mode_cmd2->pitches[0];
mode_cmd.handle = mode_cmd2->handles[0];
drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
&mode_cmd.bpp);
 
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
* requested framebuffer.
*/
 
if (!vmw_kms_validate_mode_vram(dev_priv,
mode_cmd.pitch,
mode_cmd.height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
 
/*
* Take a reference on the user object of the resource
* backing the kms fb. This ensures that user-space handle
* lookups on that resource will always work as long as
* it's registered with a kms framebuffer. This is important,
* since vmw_execbuf_process identifies resources in the
* command stream using user-space handles.
*/
 
user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
}
 
/**
* End conditioned code.
*/
 
/* returns either a dmabuf or surface */
// ret = vmw_user_lookup_handle(dev_priv, tfile,
// mode_cmd.handle,
// &surface, &bo);
// if (ret)
// goto err_out;
 
/* Create the new framebuffer depending one what we got back */
// if (bo)
// ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
// &mode_cmd);
// else if (surface)
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
surface, &vfb, &mode_cmd);
// else
// BUG();
 
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
// if (bo)
// vmw_dmabuf_unreference(&bo);
// if (surface)
// vmw_surface_unreference(&surface);
 
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
// ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
} else
vfb->user_obj = user_obj;
 
return &vfb->base;
}
 
static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
 
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct vmw_surface *surface,
uint32_t sid,
int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
 
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
SVGASignedRect *blits;
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
 
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
 
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
 
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
ret = -ENOMEM;
goto out_free_tmp;
}
 
left = clips->x;
right = clips->x + clips->w;
top = clips->y;
bottom = clips->y + clips->h;
 
for (i = 1; i < num_clips; i++) {
left = min_t(int, left, (int)clips[i].x);
right = max_t(int, right, (int)clips[i].x + clips[i].w);
top = min_t(int, top, (int)clips[i].y);
bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
 
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
 
blits = (SVGASignedRect *)&cmd[1];
 
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
 
for (i = 0; i < num_clips; i++) {
tmp[i].x1 = clips[i].x - left;
tmp[i].x2 = clips[i].x + clips[i].w - left;
tmp[i].y1 = clips[i].y - top;
tmp[i].y2 = clips[i].y + clips[i].h - top;
}
 
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
struct vmw_clip_rect clip;
int num;
 
clip.x1 = left + destX - unit->crtc.x;
clip.y1 = top + destY - unit->crtc.y;
clip.x2 = right + destX - unit->crtc.x;
clip.y2 = bottom + destY - unit->crtc.y;
 
/* skip any crtcs that misses the clip region */
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
 
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
 
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
 
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
cmd->body.destScreenId = unit->unit;
 
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
/* if no cliprects hit skip this */
if (num == 0)
continue;
 
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, NULL);
 
if (unlikely(ret != 0))
break;
}
 
kfree(cmd);
out_free_tmp:
kfree(tmp);
 
return ret;
}
 
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
struct vmw_dma_buffer *dmabuf = vfbd->buffer;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, ret, num_units, blits_pos;
 
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
struct {
uint32_t header;
SVGAFifoCmdBlitScreenToGMRFB body;
} *blits;
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
 
BUG_ON(dmabuf == NULL);
BUG_ON(!clips || !num_clips);
 
/* take a safe guess at fifo size */
fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
return -ENOMEM;
}
 
memset(cmd, 0, fifo_size);
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
cmd->body.format.colorDepth = vfb->base.depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = vfb->base.pitches[0];
cmd->body.ptr.gmrId = vfb->user_handle;
cmd->body.ptr.offset = 0;
 
blits = (void *)&cmd[1];
blits_pos = 0;
for (i = 0; i < num_units; i++) {
struct drm_vmw_rect *c = clips;
for (k = 0; k < num_clips; k++, c++) {
/* transform clip coords to crtc origin based coords */
int clip_x1 = c->x - units[i]->crtc.x;
int clip_x2 = c->x - units[i]->crtc.x + c->w;
int clip_y1 = c->y - units[i]->crtc.y;
int clip_y2 = c->y - units[i]->crtc.y + c->h;
int dest_x = c->x;
int dest_y = c->y;
 
/* compensate for clipping, we negate
* a negative number and add that.
*/
if (clip_x1 < 0)
dest_x += -clip_x1;
if (clip_y1 < 0)
dest_y += -clip_y1;
 
/* clip */
clip_x1 = max(clip_x1, 0);
clip_y1 = max(clip_y1, 0);
clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
 
/* and cull any rects that misses the crtc */
if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
clip_y1 >= units[i]->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
continue;
 
blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
blits[blits_pos].body.srcScreenId = units[i]->unit;
blits[blits_pos].body.destOrigin.x = dest_x;
blits[blits_pos].body.destOrigin.y = dest_y;
 
blits[blits_pos].body.srcRect.left = clip_x1;
blits[blits_pos].body.srcRect.top = clip_y1;
blits[blits_pos].body.srcRect.right = clip_x2;
blits[blits_pos].body.srcRect.bottom = clip_y2;
blits_pos++;
}
}
/* reset size here and use calculated exact size from loops */
fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
 
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
0, user_fence_rep, NULL);
 
kfree(cmd);
 
return ret;
}
 
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int ret;
 
ENTER();
 
drm_mode_config_init(dev);
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
/* assumed largest fb size */
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
 
ret = vmw_kms_init_screen_object_display(dev_priv);
// if (ret) /* Fallback */
// (void)vmw_kms_init_legacy_display_system(dev_priv);
 
LEAVE();
 
return 0;
}
 
int vmw_kms_close(struct vmw_private *dev_priv)
{
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
// drm_mode_config_cleanup(dev_priv->dev);
// if (dev_priv->sou_priv)
// vmw_kms_close_screen_object_display(dev_priv);
// else
// vmw_kms_close_legacy_display_system(dev_priv);
return 0;
}
 
#if 0
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
 
 
mutex_lock(&dev->mode_config.mutex);
if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
du->hotspot_y = arg->yhot;
}
 
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
 
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
goto out;
}
 
crtc = obj_to_crtc(obj);
du = vmw_crtc_to_du(crtc);
 
du->hotspot_x = arg->xhot;
du->hotspot_y = arg->yhot;
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
#endif
 
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
{
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
 
if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
return -EINVAL;
}
 
return 0;
}
 
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
struct vmw_vga_topology_state *save;
uint32_t i;
 
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
SVGA_FIFO_PITCHLOCK);
 
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
 
vmw_priv->num_displays = vmw_read(vmw_priv,
SVGA_REG_NUM_GUEST_DISPLAYS);
 
if (vmw_priv->num_displays == 0)
vmw_priv->num_displays = 1;
 
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
if (i == 0 && vmw_priv->num_displays == 1 &&
save->width == 0 && save->height == 0) {
 
/*
* It should be fairly safe to assume that these
* values are uninitialized.
*/
 
save->width = vmw_priv->vga_width - save->pos_x;
save->height = vmw_priv->vga_height - save->pos_y;
}
}
 
return 0;
}
 
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
struct vmw_vga_topology_state *save;
uint32_t i;
 
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_priv->vga_pitchlock);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(vmw_priv->vga_pitchlock,
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
 
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
 
return 0;
}
 
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
}
 
 
/**
* Function called by DRM code called with vbl_lock held.
*/
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
{
return 0;
}
 
/**
* Function called by DRM code called with vbl_lock held.
*/
int vmw_enable_vblank(struct drm_device *dev, int crtc)
{
return -ENOSYS;
}
 
/**
* Function called by DRM code called with vbl_lock held.
*/
void vmw_disable_vblank(struct drm_device *dev, int crtc)
{
}
 
 
/*
* Small shared kms functions.
*/
 
int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_connector *con;
 
mutex_lock(&dev->mode_config.mutex);
 
#if 0
{
unsigned int i;
 
DRM_INFO("%s: new layout ", __func__);
for (i = 0; i < num; i++)
DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
rects[i].w, rects[i].h);
DRM_INFO("\n");
}
#endif
 
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
if (num > du->unit) {
du->pref_width = rects[du->unit].w;
du->pref_height = rects[du->unit].h;
du->pref_active = true;
du->gui_x = rects[du->unit].x;
du->gui_y = rects[du->unit].y;
} else {
du->pref_width = 800;
du->pref_height = 600;
du->pref_active = false;
}
con->status = vmw_du_connector_detect(con, true);
}
 
mutex_unlock(&dev->mode_config.mutex);
 
return 0;
}
 
#if 0
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct drm_file *file_priv ;
struct vmw_fence_obj *fence = NULL;
struct drm_clip_rect clips;
int ret;
 
if (event == NULL)
return -EINVAL;
 
/* require ScreenObject support for page flipping */
if (!dev_priv->sou_priv)
return -ENOSYS;
 
file_priv = event->base.file_priv;
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;
 
crtc->fb = fb;
 
/* do a full screen dirty update */
clips.x1 = clips.y1 = 0;
clips.x2 = fb->width;
clips.y2 = fb->height;
 
if (vfb->dmabuf)
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
0, 0, &clips, 1, 1, &fence);
else
ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
0, 0, &clips, 1, 1, &fence);
 
 
if (ret != 0)
goto out_no_fence;
if (!fence) {
ret = -EINVAL;
goto out_no_fence;
}
 
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
&event->event.tv_sec,
&event->event.tv_usec,
true);
 
/*
* No need to hold on to this now. The only cleanup
* we need to do if we fail is unref the fence.
*/
vmw_fence_obj_unreference(&fence);
 
if (vmw_crtc_to_du(crtc)->is_implicit)
vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
 
return ret;
 
out_no_fence:
crtc->fb = old_fb;
return ret;
}
#endif
 
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
 
void vmw_du_crtc_restore(struct drm_crtc *crtc)
{
}
 
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
int i;
 
for (i = 0; i < size; i++) {
DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
r[i], g[i], b[i]);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
}
}
 
void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
}
 
void vmw_du_connector_save(struct drm_connector *connector)
{
}
 
void vmw_du_connector_restore(struct drm_connector *connector)
{
}
 
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force)
{
uint32_t num_displays;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
 
mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
mutex_unlock(&dev_priv->hw_mutex);
 
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
connector_status_connected : connector_status_disconnected);
}
 
static struct drm_display_mode vmw_kms_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* Terminate */
{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
};
 
/**
* vmw_guess_mode_timing - Provide fake timings for a
* 60Hz vrefresh mode.
*
* @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
static void vmw_guess_mode_timing(struct drm_display_mode *mode)
{
mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50;
mode->htotal = mode->hsync_end + 50;
 
mode->vsync_start = mode->vdisplay + 50;
mode->vsync_end = mode->vsync_start + 50;
mode->vtotal = mode->vsync_end + 50;
 
mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
mode->vrefresh = drm_mode_vrefresh(mode);
}
 
 
int vmw_du_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
struct vmw_display_unit *du = vmw_connector_to_du(connector);
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
 
/* Add preferred mode */
{
mode = drm_mode_duplicate(dev, &prefmode);
if (!mode)
return 0;
mode->hdisplay = du->pref_width;
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
 
if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
mode->vdisplay)) {
drm_mode_probed_add(connector, mode);
} else {
drm_mode_destroy(dev, mode);
mode = NULL;
}
 
if (du->pref_mode) {
list_del_init(&du->pref_mode->head);
drm_mode_destroy(dev, du->pref_mode);
}
 
/* mode might be null here, this is intended */
du->pref_mode = mode;
}
 
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
continue;
 
if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
bmode->vdisplay))
continue;
 
mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
mode->vrefresh = drm_mode_vrefresh(mode);
 
drm_mode_probed_add(connector, mode);
}
 
/* Move the prefered mode first, help apps pick the right mode. */
if (du->pref_mode)
list_move(&du->pref_mode->head, &connector->probed_modes);
 
drm_mode_connector_list_update(connector);
 
return 1;
}
 
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
 
#if 0
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects;
struct drm_vmw_rect *rects;
unsigned rects_size;
int ret;
int i;
struct drm_mode_config *mode_config = &dev->mode_config;
 
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect);
goto out_unlock;
}
 
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
}
 
user_rects = (void __user *)(unsigned long)arg->rects;
ret = copy_from_user(rects, user_rects, rects_size);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to get rects.\n");
ret = -EFAULT;
goto out_free;
}
 
for (i = 0; i < arg->num_outputs; ++i) {
if (rects[i].x < 0 ||
rects[i].y < 0 ||
rects[i].x + rects[i].w > mode_config->max_width ||
rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
}
}
 
vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
 
out_free:
kfree(rects);
out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_kms.h
0,0 → 1,166
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
 
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "vmwgfx_drv.h"
 
#define VMWGFX_NUM_DISPLAY_UNITS 8
 
 
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
 
/**
* Base class for framebuffers
*
* @pin is called the when ever a crtc uses this framebuffer
* @unpin is called
*/
struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
bool dmabuf;
struct ttm_base_object *user_obj;
uint32_t user_handle;
};
 
 
#define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc)
 
/*
* Basic cursor manipulation
*/
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
 
 
/**
* Base class display unit.
*
* Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
* so the display unit is all of them at the same time. This is true for both
* legacy multimon and screen objects.
*/
struct vmw_display_unit {
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
 
struct vmw_surface *cursor_surface;
struct vmw_dma_buffer *cursor_dmabuf;
size_t cursor_age;
 
int cursor_x;
int cursor_y;
 
int hotspot_x;
int hotspot_y;
 
unsigned unit;
 
/*
* Prefered mode tracking.
*/
unsigned pref_width;
unsigned pref_height;
bool pref_active;
struct drm_display_mode *pref_mode;
 
/*
* Gui positioning
*/
int gui_x;
int gui_y;
bool is_implicit;
};
 
#define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc)
#define vmw_connector_to_du(x) \
container_of(x, struct vmw_display_unit, connector)
 
 
/*
* Shared display unit functions - vmwgfx_kms.c
*/
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size);
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force);
int vmw_du_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height);
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
 
 
/*
* Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
 
/*
* Screen Objects display functions - vmwgfx_scrn.c
*/
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects);
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
struct drm_crtc *crtc);
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc);
 
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_marker.c
0,0 → 1,208
/**************************************************************************
*
* Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
 
#include "vmwgfx_drv.h"
#include <linux/time.h>
 
struct vmw_marker {
struct list_head head;
uint32_t seqno;
struct timespec submitted;
};
 
void vmw_marker_queue_init(struct vmw_marker_queue *queue)
{
INIT_LIST_HEAD(&queue->head);
queue->lag = ns_to_timespec(0);
// getrawmonotonic(&queue->lag_time);
spin_lock_init(&queue->lock);
}
 
void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
{
struct vmw_marker *marker, *next;
 
spin_lock(&queue->lock);
list_for_each_entry_safe(marker, next, &queue->head, head) {
kfree(marker);
}
spin_unlock(&queue->lock);
}
 
int vmw_marker_push(struct vmw_marker_queue *queue,
uint32_t seqno)
{
struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
 
if (unlikely(!marker))
return -ENOMEM;
 
marker->seqno = seqno;
// getrawmonotonic(&marker->submitted);
spin_lock(&queue->lock);
list_add_tail(&marker->head, &queue->head);
spin_unlock(&queue->lock);
 
return 0;
}
 
int vmw_marker_pull(struct vmw_marker_queue *queue,
uint32_t signaled_seqno)
{
struct vmw_marker *marker, *next;
struct timespec now;
bool updated = false;
 
spin_lock(&queue->lock);
// getrawmonotonic(&now);
 
if (list_empty(&queue->head)) {
// queue->lag = ns_to_timespec(0);
queue->lag_time = now;
updated = true;
goto out_unlock;
}
 
list_for_each_entry_safe(marker, next, &queue->head, head) {
if (signaled_seqno - marker->seqno > (1 << 30))
continue;
 
// queue->lag = timespec_sub(now, marker->submitted);
queue->lag_time = now;
updated = true;
list_del(&marker->head);
kfree(marker);
}
 
out_unlock:
spin_unlock(&queue->lock);
 
return (updated) ? 0 : -EBUSY;
}
 
static struct timespec vmw_timespec_add(struct timespec t1,
struct timespec t2)
{
t1.tv_sec += t2.tv_sec;
t1.tv_nsec += t2.tv_nsec;
if (t1.tv_nsec >= 1000000000L) {
t1.tv_sec += 1;
t1.tv_nsec -= 1000000000L;
}
 
return t1;
}
 
static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
{
struct timespec now;
 
spin_lock(&queue->lock);
// getrawmonotonic(&now);
// queue->lag = vmw_timespec_add(queue->lag,
// timespec_sub(now, queue->lag_time));
queue->lag_time = now;
spin_unlock(&queue->lock);
return queue->lag;
}
 
 
static bool vmw_lag_lt(struct vmw_marker_queue *queue,
uint32_t us)
{
struct timespec lag, cond;
 
cond = ns_to_timespec((s64) us * 1000);
lag = vmw_fifo_lag(queue);
return (timespec_compare(&lag, &cond) < 1);
}
 
int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_marker_queue *queue, uint32_t us)
{
struct vmw_marker *marker;
uint32_t seqno;
int ret;
 
while (!vmw_lag_lt(queue, us)) {
spin_lock(&queue->lock);
if (list_empty(&queue->head))
seqno = atomic_read(&dev_priv->marker_seq);
else {
marker = list_first_entry(&queue->head,
struct vmw_marker, head);
seqno = marker->seqno;
}
spin_unlock(&queue->lock);
 
ret = vmw_wait_seqno(dev_priv, false, seqno, true,
3*HZ);
 
if (unlikely(ret != 0))
return ret;
 
(void) vmw_marker_pull(queue, seqno);
}
return 0;
}
 
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
u64 quotient;
 
if (dividend < 0) {
quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
*remainder = -*remainder;
if (divisor > 0)
quotient = -quotient;
} else {
quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
if (divisor < 0)
quotient = -quotient;
}
return quotient;
}
 
struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
s32 rem;
 
if (!nsec)
return (struct timespec) {0, 0};
 
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
if (unlikely(rem < 0)) {
ts.tv_sec--;
rem += NSEC_PER_SEC;
}
ts.tv_nsec = rem;
 
return ts;
}
 
/drivers/video/drm/vmwgfx/vmwgfx_reg.h
0,0 → 1,57
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
/**
* This file contains virtual hardware defines for kernel space.
*/
 
#ifndef _VMWGFX_REG_H_
#define _VMWGFX_REG_H_
 
#include <linux/types.h>
 
#define VMWGFX_INDEX_PORT 0x0
#define VMWGFX_VALUE_PORT 0x1
#define VMWGFX_IRQSTATUS_PORT 0x8
 
struct svga_guest_mem_descriptor {
__le32 ppn;
__le32 num_pages;
};
 
struct svga_fifo_cmd_fence {
__le32 fence;
};
 
#define SVGA_SYNC_GENERIC 1
#define SVGA_SYNC_FIFOFULL 2
 
#include "svga_types.h"
 
#include "svga3d_reg.h"
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_resource.c
0,0 → 1,1308
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_drv.h"
#include <drm/vmwgfx_drm.h>
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
 
struct vmw_user_dma_buffer {
struct ttm_base_object base;
struct vmw_dma_buffer dma;
};
 
struct vmw_bo_user_rep {
uint32_t handle;
uint64_t map_handle;
};
 
struct vmw_stream {
struct vmw_resource res;
uint32_t stream_id;
};
 
struct vmw_user_stream {
struct ttm_base_object base;
struct vmw_stream stream;
};
 
 
static uint64_t vmw_user_stream_size;
 
static const struct vmw_res_func vmw_stream_func = {
.res_type = vmw_res_stream,
.needs_backup = false,
.may_evict = false,
.type_name = "video streams",
.backup_placement = NULL,
.create = NULL,
.destroy = NULL,
.bind = NULL,
.unbind = NULL
};
 
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
return container_of(bo, struct vmw_dma_buffer, base);
}
 
static inline struct vmw_user_dma_buffer *
vmw_user_dma_buffer(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
}
 
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
kref_get(&res->kref);
return res;
}
 
 
/**
* vmw_resource_release_id - release a resource id to the id manager.
*
* @res: Pointer to the resource.
*
* Release the resource id to the resource id manager and set it to -1
*/
void vmw_resource_release_id(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
write_lock(&dev_priv->resource_lock);
if (res->id != -1)
idr_remove(idr, res->id);
res->id = -1;
write_unlock(&dev_priv->resource_lock);
}
 
static void vmw_resource_release(struct kref *kref)
{
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
res->avail = false;
list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
 
ttm_bo_reserve(bo, false, false, false, 0);
if (!list_empty(&res->mob_head) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
 
val_buf.bo = bo;
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
list_del_init(&res->mob_head);
ttm_bo_unreserve(bo);
vmw_dmabuf_unreference(&res->backup);
}
 
if (likely(res->hw_destroy != NULL))
res->hw_destroy(res);
 
id = res->id;
if (res->res_free != NULL)
res->res_free(res);
else
kfree(res);
 
write_lock(&dev_priv->resource_lock);
 
if (id != -1)
idr_remove(idr, id);
}
 
void vmw_resource_unreference(struct vmw_resource **p_res)
{
struct vmw_resource *res = *p_res;
struct vmw_private *dev_priv = res->dev_priv;
 
*p_res = NULL;
write_lock(&dev_priv->resource_lock);
kref_put(&res->kref, vmw_resource_release);
write_unlock(&dev_priv->resource_lock);
}
 
 
/**
* vmw_resource_alloc_id - release a resource id to the id manager.
*
* @res: Pointer to the resource.
*
* Allocate the lowest free resource from the resource manager, and set
* @res->id to that id. Returns 0 on success and -ENOMEM on failure.
*/
int vmw_resource_alloc_id(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
BUG_ON(res->id != -1);
 
idr_preload(GFP_KERNEL);
write_lock(&dev_priv->resource_lock);
 
ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
if (ret >= 0)
res->id = ret;
 
write_unlock(&dev_priv->resource_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
}
 
/**
* vmw_resource_init - initialize a struct vmw_resource
*
* @dev_priv: Pointer to a device private struct.
* @res: The struct vmw_resource to initialize.
* @obj_type: Resource object type.
* @delay_id: Boolean whether to defer device id allocation until
* the first validation.
* @res_free: Resource destructor.
* @func: Resource function table.
*/
int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
bool delay_id,
void (*res_free) (struct vmw_resource *res),
const struct vmw_res_func *func)
{
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
res->avail = false;
res->dev_priv = dev_priv;
res->func = func;
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->mob_head);
res->id = -1;
res->backup = NULL;
res->backup_offset = 0;
res->backup_dirty = false;
res->res_dirty = false;
if (delay_id)
return 0;
else
return vmw_resource_alloc_id(res);
}
 
/**
* vmw_resource_activate
*
* @res: Pointer to the newly created resource
* @hw_destroy: Destroy function. NULL if none.
*
* Activate a resource after the hardware has been made aware of it.
* Set tye destroy function to @destroy. Typically this frees the
* resource and destroys the hardware resources associated with it.
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
void vmw_resource_activate(struct vmw_resource *res,
void (*hw_destroy) (struct vmw_resource *))
{
struct vmw_private *dev_priv = res->dev_priv;
 
write_lock(&dev_priv->resource_lock);
res->avail = true;
res->hw_destroy = hw_destroy;
write_unlock(&dev_priv->resource_lock);
}
 
struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
struct idr *idr, int id)
{
struct vmw_resource *res;
 
read_lock(&dev_priv->resource_lock);
res = idr_find(idr, id);
if (res && res->avail)
kref_get(&res->kref);
else
res = NULL;
read_unlock(&dev_priv->resource_lock);
 
if (unlikely(res == NULL))
return NULL;
 
return res;
}
 
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
* TTM user-space handle and perform basic type checks
*
* @dev_priv: Pointer to a device private struct
* @tfile: Pointer to a struct ttm_object_file identifying the caller
* @handle: The TTM user-space handle
* @converter: Pointer to an object describing the resource type
* @p_res: On successful return the location pointed to will contain
* a pointer to a refcounted struct vmw_resource.
*
* If the handle can't be found or is associated with an incorrect resource
* type, -EINVAL will be returned.
*/
int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv
*converter,
struct vmw_resource **p_res)
{
struct ttm_base_object *base;
struct vmw_resource *res;
int ret = -EINVAL;
 
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL))
return -EINVAL;
 
if (unlikely(base->object_type != converter->object_type))
goto out_bad_resource;
 
res = converter->base_obj_to_res(base);
 
read_lock(&dev_priv->resource_lock);
if (!res->avail || res->res_free != converter->res_free) {
read_unlock(&dev_priv->resource_lock);
goto out_bad_resource;
}
 
kref_get(&res->kref);
read_unlock(&dev_priv->resource_lock);
 
*p_res = res;
ret = 0;
 
out_bad_resource:
ttm_base_object_unref(&base);
 
return ret;
}
 
/**
* Helper function that looks either a surface or dmabuf.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf)
{
struct vmw_resource *res;
int ret;
 
BUG_ON(*out_surf || *out_buf);
 
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
user_surface_converter,
&res);
if (!ret) {
*out_surf = vmw_res_to_srf(res);
return 0;
}
 
*out_surf = NULL;
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
return ret;
}
 
/**
* Buffer management.
*/
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 
kfree(vmw_bo);
}
 
int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible,
void (*bo_free) (struct ttm_buffer_object *bo))
{
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
 
BUG_ON(!bo_free);
 
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
memset(vmw_bo, 0, sizeof(*vmw_bo));
 
INIT_LIST_HEAD(&vmw_bo->res_list);
 
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
}
 
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 
// ttm_base_object_kfree(vmw_user_bo, base);
}
 
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base = *p_base;
struct ttm_buffer_object *bo;
 
*p_base = NULL;
 
if (unlikely(base == NULL))
return;
 
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
bo = &vmw_user_bo->dma.base;
ttm_bo_unref(&bo);
}
 
/**
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
*
* @dev_priv: Pointer to a struct device private.
* @tfile: Pointer to a struct ttm_object_file on which to register the user
* object.
* @size: Size of the dma buffer.
* @shareable: Boolean whether the buffer is shareable with other open files.
* @handle: Pointer to where the handle value should be assigned.
* @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
* should be assigned.
*/
int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf)
{
struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp;
int ret;
 
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
if (unlikely(user_bo == NULL)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
 
ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
return ret;
 
tmp = ttm_bo_reference(&user_bo->dma.base);
ret = ttm_base_object_init(tfile,
&user_bo->base,
shareable,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
 
*p_dma_buf = &user_bo->dma;
*handle = user_bo->base.hash.key;
 
out_no_base_object:
return ret;
}
 
/**
* vmw_user_dmabuf_verify_access - verify access permissions on this
* buffer object.
*
* @bo: Pointer to the buffer object being accessed
* @tfile: Identifying the caller.
*/
int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile)
{
struct vmw_user_dma_buffer *vmw_user_bo;
 
if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
return -EPERM;
 
vmw_user_bo = vmw_user_dma_buffer(bo);
return (vmw_user_bo->base.tfile == tfile ||
vmw_user_bo->base.shareable) ? 0 : -EPERM;
}
 
#if 0
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_alloc_dmabuf_arg *arg =
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_dma_buffer *dma_buf;
uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &dma_buf);
if (unlikely(ret != 0))
goto out_no_dmabuf;
 
rep->handle = handle;
rep->map_handle = dma_buf->base.addr_space_offset;
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
 
vmw_dmabuf_unreference(&dma_buf);
 
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
 
return ret;
}
 
int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
 
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
arg->handle,
TTM_REF_USAGE);
}
#endif
 
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
{
struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base;
 
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL)) {
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -ESRCH;
}
 
if (unlikely(base->object_type != ttm_buffer_type)) {
ttm_base_object_unref(&base);
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -EINVAL;
}
 
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
 
return 0;
}
 
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf)
{
struct vmw_user_dma_buffer *user_bo;
 
if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
return -EINVAL;
 
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
}
 
/*
* Stream management
*/
 
static void vmw_stream_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_stream *stream;
int ret;
 
DRM_INFO("%s: unref\n", __func__);
stream = container_of(res, struct vmw_stream, res);
 
ret = vmw_overlay_unref(dev_priv, stream->stream_id);
WARN_ON(ret != 0);
}
 
static int vmw_stream_init(struct vmw_private *dev_priv,
struct vmw_stream *stream,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_resource *res = &stream->res;
int ret;
 
ret = vmw_resource_init(dev_priv, res, false, res_free,
&vmw_stream_func);
 
if (unlikely(ret != 0)) {
if (res_free == NULL)
kfree(stream);
else
res_free(&stream->res);
return ret;
}
 
ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
if (ret) {
vmw_resource_unreference(&res);
return ret;
}
 
DRM_INFO("%s: claimed\n", __func__);
 
vmw_resource_activate(&stream->res, vmw_stream_destroy);
return 0;
}
 
static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
struct vmw_private *dev_priv = res->dev_priv;
 
// ttm_base_object_kfree(stream, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
}
 
/**
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
 
static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_stream *stream =
container_of(base, struct vmw_user_stream, base);
struct vmw_resource *res = &stream->stream.res;
 
*p_base = NULL;
vmw_resource_unreference(&res);
}
 
#if 0
int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_resource *res;
struct vmw_user_stream *stream;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
int ret = 0;
 
 
res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
if (unlikely(res == NULL))
return -EINVAL;
 
if (res->res_free != &vmw_user_stream_free) {
ret = -EINVAL;
goto out;
}
 
stream = container_of(res, struct vmw_user_stream, stream.res);
if (stream->base.tfile != tfile) {
ret = -EINVAL;
goto out;
}
 
ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
out:
vmw_resource_unreference(&res);
return ret;
}
 
int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_stream *stream;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of streams anyway?
*/
 
if (unlikely(vmw_user_stream_size == 0))
vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
 
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_stream_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for stream"
" creation.\n");
goto out_unlock;
}
 
 
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (unlikely(stream == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
ret = -ENOMEM;
goto out_unlock;
}
 
res = &stream->stream.res;
stream->base.shareable = false;
stream->base.tfile = NULL;
 
/*
* From here on, the destructor takes over resource freeing.
*/
 
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
goto out_unlock;
 
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
&vmw_user_stream_base_release, NULL);
 
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
 
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
#endif
 
int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id, struct vmw_resource **out)
{
struct vmw_user_stream *stream;
struct vmw_resource *res;
int ret;
 
res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
*inout_id);
if (unlikely(res == NULL))
return -EINVAL;
 
if (res->res_free != &vmw_user_stream_free) {
ret = -EINVAL;
goto err_ref;
}
 
stream = container_of(res, struct vmw_user_stream, stream.res);
if (stream->base.tfile != tfile) {
ret = -EPERM;
goto err_ref;
}
 
*inout_id = stream->stream.stream_id;
*out = res;
return 0;
err_ref:
vmw_resource_unreference(&res);
return ret;
}
 
#if 0
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_buffer_object *tmp;
int ret;
 
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
 
vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
if (vmw_user_bo == NULL)
return -ENOMEM;
 
ret = ttm_read_lock(&vmaster->lock, true);
if (ret != 0) {
kfree(vmw_user_bo);
return ret;
}
 
ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (ret != 0)
goto out_no_dmabuf;
 
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
&vmw_user_bo->base,
false,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0))
goto out_no_base_object;
 
args->handle = vmw_user_bo->base.hash.key;
 
out_no_base_object:
ttm_bo_unref(&tmp);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
return ret;
}
#endif
 
int vmw_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_dma_buffer *out_buf;
int ret;
 
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
if (ret != 0)
return -EINVAL;
 
*offset = out_buf->base.addr_space_offset;
vmw_dmabuf_unreference(&out_buf);
return 0;
}
 
int vmw_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle)
{
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
}
 
/**
* vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
*
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
*/
static int vmw_resource_buf_alloc(struct vmw_resource *res,
bool interruptible)
{
unsigned long size =
(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
struct vmw_dma_buffer *backup;
int ret;
 
if (likely(res->backup)) {
BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
return 0;
}
 
backup = kzalloc(sizeof(*backup), GFP_KERNEL);
if (unlikely(backup == NULL))
return -ENOMEM;
 
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement,
interruptible,
&vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto out_no_dmabuf;
 
res->backup = backup;
 
out_no_dmabuf:
return ret;
}
 
/**
* vmw_resource_do_validate - Make a resource up-to-date and visible
* to the device.
*
* @res: The resource to make visible to the device.
* @val_buf: Information about a buffer possibly
* containing backup data if a bind operation is needed.
*
* On hardware resource shortage, this function returns -EBUSY and
* should be retried once resources have been freed up.
*/
static int vmw_resource_do_validate(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
int ret = 0;
const struct vmw_res_func *func = res->func;
 
if (unlikely(res->id == -1)) {
ret = func->create(res);
if (unlikely(ret != 0))
return ret;
}
 
if (func->bind &&
((func->needs_backup && list_empty(&res->mob_head) &&
val_buf->bo != NULL) ||
(!func->needs_backup && val_buf->bo != NULL))) {
ret = func->bind(res, val_buf);
if (unlikely(ret != 0))
goto out_bind_failed;
if (func->needs_backup)
list_add_tail(&res->mob_head, &res->backup->res_list);
}
 
/*
* Only do this on write operations, and move to
* vmw_resource_unreserve if it can be called after
* backup buffers have been unreserved. Otherwise
* sort out locking.
*/
res->res_dirty = true;
 
return 0;
 
out_bind_failed:
func->destroy(res);
 
return ret;
}
 
/**
* vmw_resource_unreserve - Unreserve a resource previously reserved for
* command submission.
*
* @res: Pointer to the struct vmw_resource to unreserve.
* @new_backup: Pointer to new backup buffer if command submission
* switched.
* @new_backup_offset: New backup offset if @new_backup is !NULL.
*
* Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary.
*/
void vmw_resource_unreserve(struct vmw_resource *res,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset)
{
struct vmw_private *dev_priv = res->dev_priv;
 
if (!list_empty(&res->lru_head))
return;
 
if (new_backup && new_backup != res->backup) {
 
if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup);
}
 
res->backup = vmw_dmabuf_reference(new_backup);
lockdep_assert_held(&new_backup->base.resv->lock.base);
list_add_tail(&res->mob_head, &new_backup->res_list);
}
if (new_backup)
res->backup_offset = new_backup_offset;
 
if (!res->func->may_evict)
return;
 
write_lock(&dev_priv->resource_lock);
list_add_tail(&res->lru_head,
&res->dev_priv->res_lru[res->func->res_type]);
write_unlock(&dev_priv->resource_lock);
}
 
/**
* vmw_resource_check_buffer - Check whether a backup buffer is needed
* for a resource and in that case, allocate
* one, reserve and validate it.
*
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
* @val_buf: On successful return contains data about the
* reserved and validated backup buffer.
*/
static int
vmw_resource_check_buffer(struct vmw_resource *res,
struct ww_acquire_ctx *ticket,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
bool backup_dirty = false;
int ret;
 
if (unlikely(res->backup == NULL)) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0))
return ret;
}
 
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list);
if (unlikely(ret != 0))
goto out_no_reserve;
 
if (res->func->needs_backup && list_empty(&res->mob_head))
return 0;
 
backup_dirty = res->backup_dirty;
ret = ttm_bo_validate(&res->backup->base,
res->func->backup_placement,
true, false);
 
if (unlikely(ret != 0))
goto out_no_validate;
 
return 0;
 
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
vmw_dmabuf_unreference(&res->backup);
 
return ret;
}
 
/**
* vmw_resource_reserve - Reserve a resource for command submission
*
* @res: The resource to reserve.
*
* This function takes the resource off the LRU list and make sure
* a backup buffer is present for guest-backed resources. However,
* the buffer may not be bound to the resource at this point.
*
*/
int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
 
write_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
 
if (res->func->needs_backup && res->backup == NULL &&
!no_backup) {
ret = vmw_resource_buf_alloc(res, true);
if (unlikely(ret != 0))
return ret;
}
 
return 0;
}
 
/**
* vmw_resource_backoff_reservation - Unreserve and unreference a
* backup buffer
*.
* @val_buf: Backup buffer information.
*/
static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
 
if (likely(val_buf->bo == NULL))
return;
 
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_unref(&val_buf->bo);
}
 
/**
* vmw_resource_do_evict - Evict a resource, and transfer its data
* to a backup buffer.
*
* @res: The resource to evict.
*/
int vmw_resource_do_evict(struct vmw_resource *res)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
struct ww_acquire_ctx ticket;
int ret;
 
BUG_ON(!func->may_evict);
 
val_buf.bo = NULL;
ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
if (unlikely(ret != 0))
return ret;
 
if (unlikely(func->unbind != NULL &&
(!func->needs_backup || !list_empty(&res->mob_head)))) {
ret = func->unbind(res, res->res_dirty, &val_buf);
if (unlikely(ret != 0))
goto out_no_unbind;
list_del_init(&res->mob_head);
}
ret = func->destroy(res);
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
vmw_resource_backoff_reservation(&ticket, &val_buf);
 
return ret;
}
 
 
/**
* vmw_resource_validate - Make a resource up-to-date and visible
* to the device.
*
* @res: The resource to make visible to the device.
*
* On succesful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
* On hardware resource shortage, this function will repeatedly evict
* resources of the same type until the validation succeeds.
*/
int vmw_resource_validate(struct vmw_resource *res)
{
int ret;
struct vmw_resource *evict_res;
struct vmw_private *dev_priv = res->dev_priv;
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
struct ttm_validate_buffer val_buf;
 
if (likely(!res->func->may_evict))
return 0;
 
val_buf.bo = NULL;
if (res->backup)
val_buf.bo = &res->backup->base;
do {
ret = vmw_resource_do_validate(res, &val_buf);
if (likely(ret != -EBUSY))
break;
 
write_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) {
DRM_ERROR("Out of device device id entries "
"for %s.\n", res->func->type_name);
ret = -EBUSY;
write_unlock(&dev_priv->resource_lock);
break;
}
 
evict_res = vmw_resource_reference
(list_first_entry(lru_list, struct vmw_resource,
lru_head));
list_del_init(&evict_res->lru_head);
 
write_unlock(&dev_priv->resource_lock);
vmw_resource_do_evict(evict_res);
vmw_resource_unreference(&evict_res);
} while (1);
 
if (unlikely(ret != 0))
goto out_no_validate;
else if (!res->func->needs_backup && res->backup) {
list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup);
}
 
return 0;
 
out_no_validate:
return ret;
}
 
/**
* vmw_fence_single_bo - Utility function to fence a single TTM buffer
* object without unreserving it.
*
* @bo: Pointer to the struct ttm_buffer_object to fence.
* @fence: Pointer to the fence. If NULL, this function will
* insert a fence into the command stream..
*
* Contrary to the ttm_eu version of this function, it takes only
* a single buffer object instead of a list, and it also doesn't
* unreserve the buffer object, which needs to be done separately.
*/
void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
struct vmw_fence_obj *old_fence_obj;
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
 
if (fence == NULL)
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
else
driver->sync_obj_ref(fence);
 
spin_lock(&bdev->fence_lock);
 
old_fence_obj = bo->sync_obj;
bo->sync_obj = fence;
 
spin_unlock(&bdev->fence_lock);
 
if (old_fence_obj)
vmw_fence_obj_unreference(&old_fence_obj);
}
 
/**
* vmw_resource_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
* @mem: The truct ttm_mem_reg indicating to what memory
* region the move is taking place.
*
* For now does nothing.
*/
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
}
 
/**
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
*
* @res: The resource being queried.
*/
bool vmw_resource_needs_backup(const struct vmw_resource *res)
{
return res->func->needs_backup;
}
 
/**
* vmw_resource_evict_type - Evict all resources of a specific type
*
* @dev_priv: Pointer to a device private struct
* @type: The resource type to evict
*
* To avoid thrashing starvation or as part of the hibernation sequence,
* evict all evictable resources of a specific type.
*/
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
enum vmw_res_type type)
{
struct list_head *lru_list = &dev_priv->res_lru[type];
struct vmw_resource *evict_res;
 
do {
write_lock(&dev_priv->resource_lock);
 
if (list_empty(lru_list))
goto out_unlock;
 
evict_res = vmw_resource_reference(
list_first_entry(lru_list, struct vmw_resource,
lru_head));
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
vmw_resource_do_evict(evict_res);
vmw_resource_unreference(&evict_res);
} while (1);
 
out_unlock:
write_unlock(&dev_priv->resource_lock);
}
 
/**
* vmw_resource_evict_all - Evict all evictable resources
*
* @dev_priv: Pointer to a device private struct
*
* To avoid thrashing starvation or as part of the hibernation sequence,
* evict all evictable resources. In particular this means that all
* guest-backed resources that are registered with the device are
* evicted and the OTable becomes clean.
*/
void vmw_resource_evict_all(struct vmw_private *dev_priv)
{
enum vmw_res_type type;
 
mutex_lock(&dev_priv->cmdbuf_mutex);
 
for (type = 0; type < vmw_res_max; ++type)
vmw_resource_evict_type(dev_priv, type);
 
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
/drivers/video/drm/vmwgfx/vmwgfx_resource_priv.h
0,0 → 1,84
/**************************************************************************
*
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#ifndef _VMWGFX_RESOURCE_PRIV_H_
#define _VMWGFX_RESOURCE_PRIV_H_
 
#include "vmwgfx_drv.h"
 
/**
* struct vmw_user_resource_conv - Identify a derived user-exported resource
* type and provide a function to convert its ttm_base_object pointer to
* a struct vmw_resource
*/
struct vmw_user_resource_conv {
enum ttm_object_type object_type;
struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
void (*res_free) (struct vmw_resource *res);
};
 
/**
* struct vmw_res_func - members and functions common for a resource type
*
* @res_type: Enum that identifies the lru list to use for eviction.
* @needs_backup: Whether the resource is guest-backed and needs
* persistent buffer storage.
* @type_name: String that identifies the resource type.
* @backup_placement: TTM placement for backup buffers.
* @may_evict Whether the resource may be evicted.
* @create: Create a hardware resource.
* @destroy: Destroy a hardware resource.
* @bind: Bind a hardware resource to persistent buffer storage.
* @unbind: Unbind a hardware resource from persistent
* buffer storage.
*/
 
struct vmw_res_func {
enum vmw_res_type res_type;
bool needs_backup;
const char *type_name;
struct ttm_placement *backup_placement;
bool may_evict;
 
int (*create) (struct vmw_resource *res);
int (*destroy) (struct vmw_resource *res);
int (*bind) (struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
int (*unbind) (struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
};
 
int vmw_resource_alloc_id(struct vmw_resource *res);
void vmw_resource_release_id(struct vmw_resource *res);
int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
bool delay_id,
void (*res_free) (struct vmw_resource *res),
const struct vmw_res_func *func);
void vmw_resource_activate(struct vmw_resource *res,
void (*hw_destroy) (struct vmw_resource *));
#endif
/drivers/video/drm/vmwgfx/vmwgfx_scrn.c
0,0 → 1,574
/**************************************************************************
*
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_kms.h"
 
 
#define vmw_crtc_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.crtc)
#define vmw_encoder_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.encoder)
#define vmw_connector_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.connector)
 
struct vmw_screen_object_display {
unsigned num_implicit;
 
struct vmw_framebuffer *implicit_fb;
};
 
/**
* Display unit using screen objects.
*/
struct vmw_screen_object_unit {
struct vmw_display_unit base;
 
unsigned long buffer_size; /**< Size of allocated buffer */
struct vmw_dma_buffer *buffer; /**< Backing store buffer */
 
bool defined;
bool active_implicit;
};
 
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
{
// vmw_display_unit_cleanup(&sou->base);
kfree(sou);
}
 
 
/*
* Screen Object Display Unit CRTC functions
*/
 
static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
{
vmw_sou_destroy(vmw_crtc_to_sou(crtc));
}
 
static void vmw_sou_del_active(struct vmw_private *vmw_priv,
struct vmw_screen_object_unit *sou)
{
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
 
if (sou->active_implicit) {
if (--(ld->num_implicit) == 0)
ld->implicit_fb = NULL;
sou->active_implicit = false;
}
}
 
static void vmw_sou_add_active(struct vmw_private *vmw_priv,
struct vmw_screen_object_unit *sou,
struct vmw_framebuffer *vfb)
{
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
 
BUG_ON(!ld->num_implicit && ld->implicit_fb);
 
if (!sou->active_implicit && sou->base.is_implicit) {
ld->implicit_fb = vfb;
sou->active_implicit = true;
ld->num_implicit++;
}
}
 
/**
* Send the fifo command to create a screen.
*/
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou,
uint32_t x, uint32_t y,
struct drm_display_mode *mode)
{
size_t fifo_size;
 
struct {
struct {
uint32_t cmdType;
} header;
SVGAScreenObject obj;
} *cmd;
 
BUG_ON(!sou->buffer);
 
fifo_size = sizeof(*cmd);
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
/* The hardware has hung, nothing we can do about it here. */
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
 
memset(cmd, 0, fifo_size);
cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
cmd->obj.structSize = sizeof(SVGAScreenObject);
cmd->obj.id = sou->base.unit;
cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
cmd->obj.size.width = mode->hdisplay;
cmd->obj.size.height = mode->vdisplay;
if (sou->base.is_implicit) {
cmd->obj.root.x = x;
cmd->obj.root.y = y;
} else {
cmd->obj.root.x = sou->base.gui_x;
cmd->obj.root.y = sou->base.gui_y;
}
 
/* Ok to assume that buffer is pinned in vram */
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
 
vmw_fifo_commit(dev_priv, fifo_size);
 
sou->defined = true;
 
return 0;
}
 
/**
* Send the fifo command to destroy a screen.
*/
static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou)
{
size_t fifo_size;
int ret;
 
struct {
struct {
uint32_t cmdType;
} header;
SVGAFifoCmdDestroyScreen body;
} *cmd;
 
/* no need to do anything */
if (unlikely(!sou->defined))
return 0;
 
fifo_size = sizeof(*cmd);
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
/* the hardware has hung, nothing we can do about it here */
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
 
memset(cmd, 0, fifo_size);
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
cmd->body.screenId = sou->base.unit;
 
vmw_fifo_commit(dev_priv, fifo_size);
 
/* Force sync */
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
if (unlikely(ret != 0))
DRM_ERROR("Failed to sync with HW");
else
sou->defined = false;
 
return ret;
}
 
/**
* Free the backing store.
*/
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou)
{
struct ttm_buffer_object *bo;
 
if (unlikely(sou->buffer == NULL))
return;
 
bo = &sou->buffer->base;
ttm_bo_unref(&bo);
sou->buffer = NULL;
sou->buffer_size = 0;
}
 
/**
* Allocate the backing store for the buffer.
*/
static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou,
unsigned long size)
{
int ret;
 
if (sou->buffer_size == size)
return 0;
 
if (sou->buffer)
vmw_sou_backing_free(dev_priv, sou);
 
sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
if (unlikely(sou->buffer == NULL))
return -ENOMEM;
 
/* After we have alloced the backing store might not be able to
* resume the overlays, this is preferred to failing to alloc.
*/
// vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
// vmw_overlay_resume_all(dev_priv);
 
if (unlikely(ret != 0))
sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
else
sou->buffer_size = size;
 
return ret;
}
 
static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
{
struct vmw_private *dev_priv;
struct vmw_screen_object_unit *sou;
struct drm_connector *connector;
struct drm_display_mode *mode;
struct drm_encoder *encoder;
struct vmw_framebuffer *vfb;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
int ret = 0;
 
if (!set)
return -EINVAL;
 
if (!set->crtc)
return -EINVAL;
 
/* get the sou */
crtc = set->crtc;
sou = vmw_crtc_to_sou(crtc);
vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
dev_priv = vmw_priv(crtc->dev);
 
if (set->num_connectors > 1) {
DRM_ERROR("to many connectors\n");
return -EINVAL;
}
 
if (set->num_connectors == 1 &&
set->connectors[0] != &sou->base.connector) {
DRM_ERROR("connector doesn't match %p %p\n",
set->connectors[0], &sou->base.connector);
return -EINVAL;
}
 
/* sou only supports one fb active at the time */
if (sou->base.is_implicit &&
dev_priv->sou_priv->implicit_fb && vfb &&
!(dev_priv->sou_priv->num_implicit == 1 &&
sou->active_implicit) &&
dev_priv->sou_priv->implicit_fb != vfb) {
DRM_ERROR("Multiple framebuffers not supported\n");
return -EINVAL;
}
 
/* since they always map one to one these are safe */
connector = &sou->base.connector;
encoder = &sou->base.encoder;
 
/* should we turn the crtc off */
if (set->num_connectors == 0 || !set->mode || !set->fb) {
ret = vmw_sou_fifo_destroy(dev_priv, sou);
/* the hardware has hung don't do anything more */
if (unlikely(ret != 0))
return ret;
 
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
 
vmw_sou_del_active(dev_priv, sou);
 
vmw_sou_backing_free(dev_priv, sou);
 
return 0;
}
 
 
/* we now know we want to set a mode */
mode = set->mode;
fb = set->fb;
 
if (set->x + mode->hdisplay > fb->width ||
set->y + mode->vdisplay > fb->height) {
DRM_ERROR("set outside of framebuffer\n");
return -EINVAL;
}
 
// vmw_fb_off(dev_priv);
 
if (mode->hdisplay != crtc->mode.hdisplay ||
mode->vdisplay != crtc->mode.vdisplay) {
/* no need to check if depth is different, because backing
* store depth is forced to 4 by the device.
*/
 
ret = vmw_sou_fifo_destroy(dev_priv, sou);
/* the hardware has hung don't do anything more */
if (unlikely(ret != 0))
return ret;
 
vmw_sou_backing_free(dev_priv, sou);
}
 
if (!sou->buffer) {
/* forced to depth 4 by the device */
size_t size = mode->hdisplay * mode->vdisplay * 4;
ret = vmw_sou_backing_alloc(dev_priv, sou, size);
if (unlikely(ret != 0))
return ret;
}
 
ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
if (unlikely(ret != 0)) {
/*
* We are in a bit of a situation here, the hardware has
* hung and we may or may not have a buffer hanging of
* the screen object, best thing to do is not do anything
* if we where defined, if not just turn the crtc of.
* Not what userspace wants but it needs to htfu.
*/
if (sou->defined)
return ret;
 
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
 
return ret;
}
 
vmw_sou_add_active(dev_priv, sou, vfb);
 
connector->encoder = encoder;
encoder->crtc = crtc;
crtc->mode = *mode;
crtc->fb = fb;
crtc->x = set->x;
crtc->y = set->y;
 
return 0;
}
 
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.save = vmw_du_crtc_save,
.restore = vmw_du_crtc_restore,
// .cursor_set = vmw_du_crtc_cursor_set,
// .cursor_move = vmw_du_crtc_cursor_move,
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.set_config = vmw_sou_crtc_set_config,
// .page_flip = vmw_du_page_flip,
};
 
/*
* Screen Object Display Unit encoder functions
*/
 
static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
{
vmw_sou_destroy(vmw_encoder_to_sou(encoder));
}
 
static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
.destroy = vmw_sou_encoder_destroy,
};
 
/*
* Screen Object Display Unit connector functions
*/
 
static void vmw_sou_connector_destroy(struct drm_connector *connector)
{
vmw_sou_destroy(vmw_connector_to_sou(connector));
}
 
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.save = vmw_du_connector_save,
.restore = vmw_du_connector_restore,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
};
 
static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
{
struct vmw_screen_object_unit *sou;
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
sou = kzalloc(sizeof(*sou), GFP_KERNEL);
if (!sou)
return -ENOMEM;
 
sou->base.unit = unit;
crtc = &sou->base.crtc;
encoder = &sou->base.encoder;
connector = &sou->base.connector;
 
sou->active_implicit = false;
 
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
sou->base.pref_mode = NULL;
sou->base.is_implicit = true;
 
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
connector->status = vmw_du_connector_detect(connector, true);
 
drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL);
drm_mode_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
 
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
 
drm_mode_crtc_set_gamma_size(crtc, 256);
 
drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
 
return 0;
}
 
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int i, ret;
 
ENTER();
 
if (dev_priv->sou_priv) {
DRM_INFO("sou system already on\n");
return -EINVAL;
}
 
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
DRM_INFO("Not using screen objects,"
" missing cap SCREEN_OBJECT_2\n");
return -ENOSYS;
}
 
ret = -ENOMEM;
dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
if (unlikely(!dev_priv->sou_priv))
goto err_no_mem;
 
dev_priv->sou_priv->num_implicit = 0;
dev_priv->sou_priv->implicit_fb = NULL;
 
// ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
// if (unlikely(ret != 0))
// goto err_free;
 
ret = drm_mode_create_dirty_info_property(dev);
if (unlikely(ret != 0))
goto err_vblank_cleanup;
 
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
 
DRM_INFO("Screen objects system initialized\n");
 
LEAVE();
return 0;
 
err_vblank_cleanup:
// drm_vblank_cleanup(dev);
err_free:
kfree(dev_priv->sou_priv);
dev_priv->sou_priv = NULL;
err_no_mem:
return ret;
}
 
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
if (!dev_priv->sou_priv)
return -ENOSYS;
 
// drm_vblank_cleanup(dev);
 
kfree(dev_priv->sou_priv);
 
return 0;
}
 
/**
* Returns if this unit can be page flipped.
* Must be called with the mode_config mutex held.
*/
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
 
if (!sou->base.is_implicit)
return true;
 
if (dev_priv->sou_priv->num_implicit != 1)
return false;
 
return true;
}
 
/**
* Update the implicit fb to the current fb of this crtc.
* Must be called with the mode_config mutex held.
*/
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
struct drm_crtc *crtc)
{
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
 
BUG_ON(!sou->base.is_implicit);
 
dev_priv->sou_priv->implicit_fb =
vmw_framebuffer_to_vfb(sou->base.crtc.fb);
}
/drivers/video/drm/vmwgfx/vmwgfx_surface.c
0,0 → 1,879
/**************************************************************************
*
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include <ttm/ttm_placement.h>
#include "svga3d_surfacedefs.h"
 
/**
* struct vmw_user_surface - User-space visible surface resource
*
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @size: TTM accounting size for the surface.
*/
struct vmw_user_surface {
struct ttm_base_object base;
struct vmw_surface srf;
uint32_t size;
uint32_t backup_handle;
};
 
/**
* struct vmw_surface_offset - Backing store mip level offset info
*
* @face: Surface face.
* @mip: Mip level.
* @bo_offset: Offset into backing store of this mip level.
*
*/
struct vmw_surface_offset {
uint32_t face;
uint32_t mip;
uint32_t bo_offset;
};
 
static void vmw_user_surface_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base);
static int vmw_legacy_srf_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_legacy_srf_create(struct vmw_resource *res);
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
 
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
.base_obj_to_res = vmw_user_surface_base_to_res,
.res_free = vmw_user_surface_free
};
 
const struct vmw_user_resource_conv *user_surface_converter =
&user_surface_conv;
 
 
static uint64_t vmw_user_surface_size;
 
static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
.needs_backup = false,
.may_evict = true,
.type_name = "legacy surfaces",
.backup_placement = &vmw_srf_placement,
.create = &vmw_legacy_srf_create,
.destroy = &vmw_legacy_srf_destroy,
.bind = &vmw_legacy_srf_bind,
.unbind = &vmw_legacy_srf_unbind
};
 
/**
* struct vmw_surface_dma - SVGA3D DMA command
*/
struct vmw_surface_dma {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA body;
SVGA3dCopyBox cb;
SVGA3dCmdSurfaceDMASuffix suffix;
};
 
/**
* struct vmw_surface_define - SVGA3D Surface Define command
*/
struct vmw_surface_define {
SVGA3dCmdHeader header;
SVGA3dCmdDefineSurface body;
};
 
/**
* struct vmw_surface_destroy - SVGA3D Surface Destroy command
*/
struct vmw_surface_destroy {
SVGA3dCmdHeader header;
SVGA3dCmdDestroySurface body;
};
 
 
/**
* vmw_surface_dma_size - Compute fifo size for a dma command.
*
* @srf: Pointer to a struct vmw_surface
*
* Computes the required size for a surface dma command for backup or
* restoration of the surface represented by @srf.
*/
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
{
return srf->num_sizes * sizeof(struct vmw_surface_dma);
}
 
 
/**
* vmw_surface_define_size - Compute fifo size for a surface define command.
*
* @srf: Pointer to a struct vmw_surface
*
* Computes the required size for a surface define command for the definition
* of the surface represented by @srf.
*/
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
{
return sizeof(struct vmw_surface_define) + srf->num_sizes *
sizeof(SVGA3dSize);
}
 
 
/**
* vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
*
* Computes the required size for a surface destroy command for the destruction
* of a hw surface.
*/
static inline uint32_t vmw_surface_destroy_size(void)
{
return sizeof(struct vmw_surface_destroy);
}
 
/**
* vmw_surface_destroy_encode - Encode a surface_destroy command.
*
* @id: The surface id
* @cmd_space: Pointer to memory area in which the commands should be encoded.
*/
static void vmw_surface_destroy_encode(uint32_t id,
void *cmd_space)
{
struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
cmd_space;
 
cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
cmd->header.size = sizeof(cmd->body);
cmd->body.sid = id;
}
 
/**
* vmw_surface_define_encode - Encode a surface_define command.
*
* @srf: Pointer to a struct vmw_surface object.
* @cmd_space: Pointer to memory area in which the commands should be encoded.
*/
static void vmw_surface_define_encode(const struct vmw_surface *srf,
void *cmd_space)
{
struct vmw_surface_define *cmd = (struct vmw_surface_define *)
cmd_space;
struct drm_vmw_size *src_size;
SVGA3dSize *cmd_size;
uint32_t cmd_len;
int i;
 
cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
 
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
cmd->body.surfaceFlags = srf->flags;
cmd->body.format = cpu_to_le32(srf->format);
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
 
cmd += 1;
cmd_size = (SVGA3dSize *) cmd;
src_size = srf->sizes;
 
for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
cmd_size->width = src_size->width;
cmd_size->height = src_size->height;
cmd_size->depth = src_size->depth;
}
}
 
/**
* vmw_surface_dma_encode - Encode a surface_dma command.
*
* @srf: Pointer to a struct vmw_surface object.
* @cmd_space: Pointer to memory area in which the commands should be encoded.
* @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
* should be placed or read from.
* @to_surface: Boolean whether to DMA to the surface or from the surface.
*/
static void vmw_surface_dma_encode(struct vmw_surface *srf,
void *cmd_space,
const SVGAGuestPtr *ptr,
bool to_surface)
{
uint32_t i;
struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
const struct svga3d_surface_desc *desc =
svga3dsurface_get_desc(srf->format);
 
for (i = 0; i < srf->num_sizes; ++i) {
SVGA3dCmdHeader *header = &cmd->header;
SVGA3dCmdSurfaceDMA *body = &cmd->body;
SVGA3dCopyBox *cb = &cmd->cb;
SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
const struct drm_vmw_size *cur_size = &srf->sizes[i];
 
header->id = SVGA_3D_CMD_SURFACE_DMA;
header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
 
body->guest.ptr = *ptr;
body->guest.ptr.offset += cur_offset->bo_offset;
body->guest.pitch = svga3dsurface_calculate_pitch(desc,
cur_size);
body->host.sid = srf->res.id;
body->host.face = cur_offset->face;
body->host.mipmap = cur_offset->mip;
body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
SVGA3D_READ_HOST_VRAM);
cb->x = 0;
cb->y = 0;
cb->z = 0;
cb->srcx = 0;
cb->srcy = 0;
cb->srcz = 0;
cb->w = cur_size->width;
cb->h = cur_size->height;
cb->d = cur_size->depth;
 
suffix->suffixSize = sizeof(*suffix);
suffix->maximumOffset =
svga3dsurface_get_image_buffer_size(desc, cur_size,
body->guest.pitch);
suffix->flags.discard = 0;
suffix->flags.unsynchronized = 0;
suffix->flags.reserved = 0;
++cmd;
}
};
 
 
/**
* vmw_hw_surface_destroy - destroy a Device surface
*
* @res: Pointer to a struct vmw_resource embedded in a struct
* vmw_surface.
*
* Destroys a the device surface associated with a struct vmw_surface if
* any, and adjusts accounting and resource count accordingly.
*/
static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
 
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf;
void *cmd;
 
if (res->id != -1) {
 
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
return;
}
 
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
 
/*
* used_memory_size_atomic, or separate lock
* to avoid taking dev_priv::cmdbuf_mutex in
* the destroy path.
*/
 
mutex_lock(&dev_priv->cmdbuf_mutex);
srf = vmw_res_to_srf(res);
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
vmw_3d_resource_dec(dev_priv, false);
}
 
/**
* vmw_legacy_srf_create - Create a device surface as part of the
* resource validation process.
*
* @res: Pointer to a struct vmw_surface.
*
* If the surface doesn't have a hw id.
*
* Returns -EBUSY if there wasn't sufficient device resources to
* complete the validation. Retry after freeing up resources.
*
* May return other errors if the kernel is out of guest resources.
*/
static int vmw_legacy_srf_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf;
uint32_t submit_size;
uint8_t *cmd;
int ret;
 
if (likely(res->id != -1))
return 0;
 
srf = vmw_res_to_srf(res);
if (unlikely(dev_priv->used_memory_size + res->backup_size >=
dev_priv->memory_size))
return -EBUSY;
 
/*
* Alloc id for the resource.
*/
 
ret = vmw_resource_alloc_id(res);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a surface id.\n");
goto out_no_id;
}
 
if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
ret = -EBUSY;
goto out_no_fifo;
}
 
/*
* Encode surface define- commands.
*/
 
submit_size = vmw_surface_define_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
 
vmw_surface_define_encode(srf, cmd);
vmw_fifo_commit(dev_priv, submit_size);
/*
* Surface memory usage accounting.
*/
 
dev_priv->used_memory_size += res->backup_size;
return 0;
 
out_no_fifo:
vmw_resource_release_id(res);
out_no_id:
return ret;
}
 
/**
* vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
* @val_buf: Pointer to a struct ttm_validate_buffer containing
* information about the backup buffer.
* @bind: Boolean wether to DMA to the surface.
*
* Transfer backup data to or from a legacy surface as part of the
* validation process.
* May return other errors if the kernel is out of guest resources.
* The backup buffer will be fenced or idle upon successful completion,
* and if the surface needs persistent backup storage, the backup buffer
* will also be returned reserved iff @bind is true.
*/
static int vmw_legacy_srf_dma(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf,
bool bind)
{
SVGAGuestPtr ptr;
struct vmw_fence_obj *fence;
uint32_t submit_size;
struct vmw_surface *srf = vmw_res_to_srf(res);
uint8_t *cmd;
struct vmw_private *dev_priv = res->dev_priv;
 
BUG_ON(val_buf->bo == NULL);
 
submit_size = vmw_surface_dma_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"DMA.\n");
return -ENOMEM;
}
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
 
vmw_fifo_commit(dev_priv, submit_size);
 
/*
* Create a fence object and fence the backup buffer.
*/
 
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL);
 
vmw_fence_single_bo(val_buf->bo, fence);
 
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
 
return 0;
}
 
/**
* vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
* surface validation process.
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
* @val_buf: Pointer to a struct ttm_validate_buffer containing
* information about the backup buffer.
*
* This function will copy backup data to the surface if the
* backup buffer is dirty.
*/
static int vmw_legacy_srf_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
if (!res->backup_dirty)
return 0;
 
return vmw_legacy_srf_dma(res, val_buf, true);
}
 
 
/**
* vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
* surface eviction process.
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
* @val_buf: Pointer to a struct ttm_validate_buffer containing
* information about the backup buffer.
*
* This function will copy backup data from the surface.
*/
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf)
{
if (unlikely(readback))
return vmw_legacy_srf_dma(res, val_buf, false);
return 0;
}
 
/**
* vmw_legacy_srf_destroy - Destroy a device surface as part of a
* resource eviction process.
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
*/
static int vmw_legacy_srf_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
uint32_t submit_size;
uint8_t *cmd;
 
BUG_ON(res->id == -1);
 
/*
* Encode the dma- and surface destroy commands.
*/
 
submit_size = vmw_surface_destroy_size();
cmd = vmw_fifo_reserve(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"eviction.\n");
return -ENOMEM;
}
 
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, submit_size);
 
/*
* Surface memory usage accounting.
*/
 
dev_priv->used_memory_size -= res->backup_size;
 
/*
* Release the surface ID.
*/
 
vmw_resource_release_id(res);
 
return 0;
}
 
 
/**
* vmw_surface_init - initialize a struct vmw_surface
*
* @dev_priv: Pointer to a device private struct.
* @srf: Pointer to the struct vmw_surface to initialize.
* @res_free: Pointer to a resource destructor used to free
* the object.
*/
static int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
void (*res_free) (struct vmw_resource *res))
{
int ret;
struct vmw_resource *res = &srf->res;
 
BUG_ON(res_free == NULL);
(void) vmw_3d_resource_inc(dev_priv, false);
ret = vmw_resource_init(dev_priv, res, true, res_free,
&vmw_legacy_surface_func);
 
if (unlikely(ret != 0)) {
vmw_3d_resource_dec(dev_priv, false);
res_free(res);
return ret;
}
 
/*
* The surface won't be visible to hardware until a
* surface validate.
*/
 
vmw_resource_activate(res, vmw_hw_surface_destroy);
return ret;
}
 
/**
* vmw_user_surface_base_to_res - TTM base object to resource converter for
* user visible surfaces
*
* @base: Pointer to a TTM base object
*
* Returns the struct vmw_resource embedded in a struct vmw_surface
* for the user-visible object identified by the TTM base object @base.
*/
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{
return &(container_of(base, struct vmw_user_surface, base)->srf.res);
}
 
/**
* vmw_user_surface_free - User visible surface resource destructor
*
* @res: A struct vmw_resource embedded in a struct vmw_surface.
*/
static void vmw_user_surface_free(struct vmw_resource *res)
{
struct vmw_surface *srf = vmw_res_to_srf(res);
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
struct vmw_private *dev_priv = srf->res.dev_priv;
uint32_t size = user_srf->size;
 
kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
// ttm_base_object_kfree(user_srf, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
 
/**
* vmw_user_surface_free - User visible surface TTM base object destructor
*
* @p_base: Pointer to a pointer to a TTM base object
* embedded in a struct vmw_user_surface.
*
* Drops the base object's reference on its resource, and the
* pointer pointed to by *p_base is set to NULL.
*/
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_surface *user_srf =
container_of(base, struct vmw_user_surface, base);
struct vmw_resource *res = &user_srf->srf.res;
 
*p_base = NULL;
vmw_resource_unreference(&res);
}
 
#if 0
/**
* vmw_user_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
union drm_vmw_surface_create_arg *arg =
(union drm_vmw_surface_create_arg *)data;
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct drm_vmw_size __user *user_sizes;
int ret;
int i, j;
uint32_t cur_bo_offset;
struct drm_vmw_size *cur_size;
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
uint32_t size;
struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc;
 
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
 
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
num_sizes += req->mip_levels[i];
 
if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
DRM_VMW_MAX_MIP_LEVELS)
return -EINVAL;
 
size = vmw_user_surface_size + 128 +
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
 
 
desc = svga3dsurface_get_desc(req->format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
DRM_ERROR("Invalid surface format for surface creation.\n");
return -EINVAL;
}
 
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
size, false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface"
" creation.\n");
goto out_unlock;
}
 
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
if (unlikely(user_srf == NULL)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
 
srf = &user_srf->srf;
res = &srf->res;
 
srf->flags = req->flags;
srf->format = req->format;
srf->scanout = req->scanout;
 
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
srf->num_sizes = num_sizes;
user_srf->size = size;
 
srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
if (unlikely(srf->sizes == NULL)) {
ret = -ENOMEM;
goto out_no_sizes;
}
srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
GFP_KERNEL);
if (unlikely(srf->sizes == NULL)) {
ret = -ENOMEM;
goto out_no_offsets;
}
 
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
req->size_addr;
 
ret = copy_from_user(srf->sizes, user_sizes,
srf->num_sizes * sizeof(*srf->sizes));
if (unlikely(ret != 0)) {
ret = -EFAULT;
goto out_no_copy;
}
 
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 1;
 
cur_bo_offset = 0;
cur_offset = srf->offsets;
cur_size = srf->sizes;
 
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
for (j = 0; j < srf->mip_levels[i]; ++j) {
uint32_t stride = svga3dsurface_calculate_pitch
(desc, cur_size);
 
cur_offset->face = i;
cur_offset->mip = j;
cur_offset->bo_offset = cur_bo_offset;
cur_bo_offset += svga3dsurface_get_image_buffer_size
(desc, cur_size, stride);
++cur_offset;
++cur_size;
}
}
res->backup_size = cur_bo_offset;
if (srf->scanout &&
srf->num_sizes == 1 &&
srf->sizes[0].width == 64 &&
srf->sizes[0].height == 64 &&
srf->format == SVGA3D_A8R8G8B8) {
 
srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
/* clear the image */
if (srf->snooper.image) {
memset(srf->snooper.image, 0x00, 64 * 64 * 4);
} else {
DRM_ERROR("Failed to allocate cursor_image\n");
ret = -ENOMEM;
goto out_no_copy;
}
} else {
srf->snooper.image = NULL;
}
srf->snooper.crtc = NULL;
 
user_srf->base.shareable = false;
user_srf->base.tfile = NULL;
 
/**
* From this point, the generic resource management functions
* destroy the object on failure.
*/
 
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
if (unlikely(ret != 0))
goto out_unlock;
 
tmp = vmw_resource_reference(&srf->res);
ret = ttm_base_object_init(tfile, &user_srf->base,
req->shareable, VMW_RES_SURFACE,
&vmw_user_surface_base_release, NULL);
 
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
goto out_unlock;
}
 
rep->sid = user_srf->base.hash.key;
vmw_resource_unreference(&res);
 
ttm_read_unlock(&vmaster->lock);
return 0;
out_no_copy:
kfree(srf->offsets);
out_no_offsets:
kfree(srf->sizes);
out_no_sizes:
ttm_base_object_kfree(user_srf, base);
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
 
/**
* vmw_user_surface_define_ioctl - Ioctl function implementing
* the user surface reference functionality.
*
* @dev: Pointer to a struct drm_device.
* @data: Pointer to data copied from / to user-space.
* @file_priv: Pointer to a drm file private structure.
*/
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_vmw_surface_reference_arg *arg =
(union drm_vmw_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_surface_create_req *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
struct drm_vmw_size __user *user_sizes;
struct ttm_base_object *base;
int ret = -EINVAL;
 
base = ttm_base_object_lookup(tfile, req->sid);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL;
}
 
if (unlikely(base->object_type != VMW_RES_SURFACE))
goto out_bad_resource;
 
user_srf = container_of(base, struct vmw_user_surface, base);
srf = &user_srf->srf;
 
ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;
}
 
rep->flags = srf->flags;
rep->format = srf->format;
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
rep->size_addr;
 
if (user_sizes)
ret = copy_to_user(user_sizes, srf->sizes,
srf->num_sizes * sizeof(*srf->sizes));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
ret = -EFAULT;
}
out_bad_resource:
out_no_reference:
ttm_base_object_unref(&base);
 
return ret;
}
 
#endif
/drivers/video/drm/vmwgfx/vmwgfx_ttm_glue.c
0,0 → 1,105
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
 
#include <drm/drmP.h>
#include <drm/drm_global.h>
#include "vmwgfx_drv.h"
 
#if 0
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
struct vmw_private *dev_priv;
 
// if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
// DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
// return -EINVAL;
// }
 
file_priv = filp->private_data;
dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
#endif
 
static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
{
DRM_INFO("global init.\n");
return ttm_mem_global_init(ref->object);
}
 
static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
{
// ttm_mem_global_release(ref->object);
}
 
int vmw_ttm_global_init(struct vmw_private *dev_priv)
{
ENTER();
 
struct drm_global_reference *global_ref;
int ret;
 
global_ref = &dev_priv->mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &vmw_ttm_mem_global_init;
global_ref->release = &vmw_ttm_mem_global_release;
 
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting.\n");
return ret;
}
 
dev_priv->bo_global_ref.mem_glob =
dev_priv->mem_global_ref.object;
global_ref = &dev_priv->bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
ret = drm_global_item_ref(global_ref);
 
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM buffer objects.\n");
goto out_no_bo;
}
 
LEAVE();
return 0;
 
out_no_bo:
drm_global_item_unref(&dev_priv->mem_global_ref);
return ret;
}
 
void vmw_ttm_global_release(struct vmw_private *dev_priv)
{
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
drm_global_item_unref(&dev_priv->mem_global_ref);
}