Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5059 → Rev 5060

/drivers/video/drm/drm_crtc.c
37,36 → 37,83
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
 
#include "drm_crtc_internal.h"
 
static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv);
 
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented.
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
 
mutex_lock(&dev->mode_config.mutex);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (WARN_ON(!ctx))
return;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
mutex_lock(&config->mutex);
 
drm_modeset_acquire_init(ctx, 0);
 
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_crtcs(dev, ctx);
if (ret)
goto fail;
 
WARN_ON(config->acquire_ctx);
 
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_all():
*/
config->acquire_ctx = ctx;
 
drm_warn_on_modeset_not_all_locked(dev);
 
return;
 
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
}
EXPORT_SYMBOL(drm_modeset_lock_all);
 
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
*
* This function drop all modeset locks taken by drm_modeset_lock_all.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
mutex_unlock(&crtc->mutex);
if (WARN_ON(!ctx))
return;
 
config->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
 
kfree(ctx);
 
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
74,6 → 121,8
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
*
* Useful as a debug assert.
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
80,8 → 129,9
struct drm_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!mutex_is_locked(&crtc->mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
110,6 → 160,13
 
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
 
static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
{
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
{ DRM_PLANE_TYPE_CURSOR, "Cursor" },
};
 
/*
* Optional properties
*/
121,6 → 178,12
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
 
static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
};
 
/*
* Non-global properties, but "required" for certain connectors.
*/
209,8 → 272,19
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
{ DRM_MODE_ENCODER_DSI, "DSI" },
{ DRM_MODE_ENCODER_DPMST, "DP MST" },
};
 
static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
{
{ SubPixelUnknown, "Unknown" },
{ SubPixelHorizontalRGB, "Horizontal RGB" },
{ SubPixelHorizontalBGR, "Horizontal BGR" },
{ SubPixelVerticalRGB, "Vertical RGB" },
{ SubPixelVerticalBGR, "Vertical BGR" },
{ SubPixelNone, "None" },
};
 
void drm_connector_ida_init(void)
{
int i;
227,28 → 301,13
ida_destroy(&drm_connector_enum_list[i].ida);
}
 
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
 
snprintf(buf, 32, "%s-%d",
drm_encoder_enum_list[encoder->encoder_type].name,
encoder->base.id);
return buf;
}
EXPORT_SYMBOL(drm_get_encoder_name);
 
const char *drm_get_connector_name(const struct drm_connector *connector)
{
static char buf[32];
 
snprintf(buf, 32, "%s-%d",
drm_connector_enum_list[connector->connector_type].name,
connector->connector_type_id);
return buf;
}
EXPORT_SYMBOL(drm_get_connector_name);
 
/**
* drm_get_connector_status_name - return a string for connector status
* @status: connector status to compute name of
*
* In contrast to the other drm_get_*_name functions this one here returns a
* const pointer and hence is threadsafe.
*/
const char *drm_get_connector_status_name(enum drm_connector_status status)
{
if (status == connector_status_connected)
260,11 → 319,33
}
EXPORT_SYMBOL(drm_get_connector_status_name);
 
/**
* drm_get_subpixel_order_name - return a string for a given subpixel enum
* @order: enum of subpixel_order
*
* Note you could abuse this and return something out of bounds, but that
* would be a caller error. No unscrubbed user data should make it here.
*/
const char *drm_get_subpixel_order_name(enum subpixel_order order)
{
return drm_subpixel_enum_list[order].name;
}
EXPORT_SYMBOL(drm_get_subpixel_order_name);
 
static char printable_char(int c)
{
return isascii(c) && isprint(c) ? c : '?';
}
 
/**
* drm_get_format_name - return a string for drm fourcc format
* @format: format to compute name of
*
* Note that the buffer used by this function is globally shared and owned by
* the function itself.
*
* FIXME: This isn't really multithreading safe.
*/
const char *drm_get_format_name(uint32_t format)
{
static char buf[32];
282,26 → 363,19
}
EXPORT_SYMBOL(drm_get_format_name);
 
/**
* drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
* @obj: object pointer, used to generate unique ID
* @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
* for tracking modes, CRTCs and connectors.
*
* RETURNS:
* New unique (relative to other objects in @dev) integer identifier for the
* object.
/*
* Internal function to assign a slot in the object idr and optionally
* register the object into the idr.
*/
static int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
static int drm_mode_object_get_reg(struct drm_device *dev,
struct drm_mode_object *obj,
uint32_t obj_type,
bool register_obj)
{
int ret;
 
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
if (ret >= 0) {
/*
* Set up the object linking under the protection of the idr
316,13 → 390,44
}
 
/**
* drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
* @obj: object pointer, used to generate unique ID
* @obj_type: object type
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
* for tracking modes, CRTCs and connectors. Note that despite the _get postfix
* modeset identifiers are _not_ reference counted. Hence don't use this for
* reference counted modeset objects like framebuffers.
*
* Returns:
* New unique (relative to other objects in @dev) integer identifier for the
* object.
*/
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
return drm_mode_object_get_reg(dev, obj, obj_type, true);
}
 
static void drm_mode_object_register(struct drm_device *dev,
struct drm_mode_object *obj)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
mutex_unlock(&dev->mode_config.idr_mutex);
}
 
/**
* drm_mode_object_put - free a modeset identifer
* @dev: DRM device
* @object: object to free
*
* Free @id from @dev's unique identifier pool.
* Free @id from @dev's unique identifier pool. Note that despite the _get
* postfix modeset identifiers are _not_ reference counted. Hence don't use this
* for reference counted modeset objects like framebuffers.
*/
static void drm_mode_object_put(struct drm_device *dev,
void drm_mode_object_put(struct drm_device *dev,
struct drm_mode_object *object)
{
mutex_lock(&dev->mode_config.idr_mutex);
330,6 → 435,25
mutex_unlock(&dev->mode_config.idr_mutex);
}
 
static struct drm_mode_object *_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
 
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
obj = NULL;
if (obj && obj->id != id)
obj = NULL;
/* don't leak out unref'd fb's */
if (obj && (obj->type == DRM_MODE_OBJECT_FB))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
 
return obj;
}
 
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
337,7 → 461,9
* @type: type of the mode object
*
* Note that framebuffers cannot be looked up with this functions - since those
* are reference counted, they need special treatment.
* are reference counted, they need special treatment. Even with
* DRM_MODE_OBJECT_ANY (although that will simply return NULL
* rather than WARN_ON()).
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
347,13 → 473,7
/* Framebuffers are reference counted and need their own lookup
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
 
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != type) || (obj->id != id))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
 
obj = _object_find(dev, id, type);
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
373,7 → 493,7
* since all the fb attributes are invariant over its lifetime, no further
* locking but only correct reference counting is required.
*
* RETURNS:
* Returns:
* Zero on success, error code on failure.
*/
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
434,7 → 554,7
*
* If successful, this grabs an additional reference to the framebuffer -
* callers need to make sure to eventually unreference the returned framebuffer
* again.
* again, using @drm_framebuffer_unreference.
*/
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
uint32_t id)
459,7 → 579,7
*/
void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free);
}
EXPORT_SYMBOL(drm_framebuffer_unreference);
467,10 → 587,12
/**
* drm_framebuffer_reference - incr the fb refcnt
* @fb: framebuffer
*
* This functions increments the fb's refcount.
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_get(&fb->refcount);
}
EXPORT_SYMBOL(drm_framebuffer_reference);
482,7 → 604,7
 
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
 
523,8 → 645,9
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
* Cleanup references to a user-created framebuffer. This function is intended
* to be used from the drivers ->destroy callback.
* Cleanup framebuffer. This function is intended to be used from the drivers
* ->destroy callback. It can also be used to clean up driver private
* framebuffers embedded into a larger structure.
*
* Note that this function does not remove the fb from active usuage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
587,7 → 710,7
drm_modeset_lock_all(dev);
/* remove from any CRTC */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb == fb) {
if (crtc->primary->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
set.crtc = crtc;
609,20 → 732,28
}
EXPORT_SYMBOL(drm_framebuffer_remove);
 
DEFINE_WW_CLASS(crtc_ww_class);
 
/**
* drm_crtc_init - Initialise a new CRTC object
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
* @dev: DRM device
* @crtc: CRTC object to init
* @primary: Primary plane for CRTC
* @cursor: Cursor plane for CRTC
* @funcs: callbacks for the new CRTC
*
* Inits a new object created as base part of a driver crtc object.
*
* RETURNS:
* Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
 
crtc->dev = dev;
630,8 → 761,9
crtc->invert_dimensions = false;
 
drm_modeset_lock_all(dev);
mutex_init(&crtc->mutex);
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
drm_modeset_lock_init(&crtc->mutex);
/* dropped by _unlock_all(): */
drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
 
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
639,15 → 771,22
 
crtc->base.properties = &crtc->properties;
 
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
list_add_tail(&crtc->head, &config->crtc_list);
config->num_crtc++;
 
crtc->primary = primary;
crtc->cursor = cursor;
if (primary)
primary->possible_crtcs = 1 << drm_crtc_index(crtc);
if (cursor)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
 
out:
drm_modeset_unlock_all(dev);
 
return ret;
}
EXPORT_SYMBOL(drm_crtc_init);
EXPORT_SYMBOL(drm_crtc_init_with_planes);
 
/**
* drm_crtc_cleanup - Clean up the core crtc usage
664,6 → 803,8
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
 
drm_modeset_lock_fini(&crtc->mutex);
 
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
693,20 → 834,6
}
EXPORT_SYMBOL(drm_crtc_index);
 
/**
* drm_mode_probed_add - add a mode to a connector's probed mode list
* @connector: connector the new mode
* @mode: mode data
*
* Add @mode to @connector's mode list for later use.
*/
void drm_mode_probed_add(struct drm_connector *connector,
struct drm_display_mode *mode)
{
list_add_tail(&mode->head, &connector->probed_modes);
}
EXPORT_SYMBOL(drm_mode_probed_add);
 
/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
731,7 → 858,7
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
* RETURNS:
* Returns:
* Zero on success, error code on failure.
*/
int drm_connector_init(struct drm_device *dev,
745,9 → 872,9
 
drm_modeset_lock_all(dev);
 
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
if (ret)
goto out;
goto out_unlock;
 
connector->base.properties = &connector->properties;
connector->dev = dev;
757,9 → 884,17
ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
drm_mode_object_put(dev, &connector->base);
goto out;
goto out_put;
}
connector->name =
kasprintf(GFP_KERNEL, "%s-%d",
drm_connector_enum_list[connector_type].name,
connector->connector_type_id);
if (!connector->name) {
ret = -ENOMEM;
goto out_put;
}
 
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
776,7 → 911,13
drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
 
out:
connector->debugfs_entry = NULL;
 
out_put:
if (ret)
drm_mode_object_put(dev, &connector->base);
 
out_unlock:
drm_modeset_unlock_all(dev);
 
return ret;
804,22 → 945,75
connector->connector_type_id);
 
drm_mode_object_put(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
list_del(&connector->head);
dev->mode_config.num_connector--;
}
EXPORT_SYMBOL(drm_connector_cleanup);
 
/**
* drm_connector_register - register a connector
* @connector: the connector to register
*
* Register userspace interfaces for a connector
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_connector_register(struct drm_connector *connector)
{
int ret;
 
drm_mode_object_register(connector->dev, &connector->base);
 
return 0;
}
EXPORT_SYMBOL(drm_connector_register);
 
/**
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
* Unregister userspace interfaces for a connector
*/
void drm_connector_unregister(struct drm_connector *connector)
{
}
EXPORT_SYMBOL(drm_connector_unregister);
 
 
/**
* drm_connector_unplug_all - unregister connector userspace interfaces
* @dev: drm device
*
* This function unregisters all connector userspace interfaces in sysfs. Should
* be call when the device is disconnected, e.g. from an usb driver's
* ->disconnect callback.
*/
void drm_connector_unplug_all(struct drm_device *dev)
{
struct drm_connector *connector;
 
/* taking the mode config mutex ends up in a clash with sysfs */
// list_for_each_entry(connector, &dev->mode_config.connector_list, head)
// drm_sysfs_connector_remove(connector);
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_unregister(connector);
 
}
EXPORT_SYMBOL(drm_connector_unplug_all);
 
/**
* drm_bridge_init - initialize a drm transcoder/bridge
* @dev: drm device
* @bridge: transcoder/bridge to set up
* @funcs: bridge function table
*
* Initialises a preallocated bridge. Bridges should be
* subclassed as part of driver connector objects.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
const struct drm_bridge_funcs *funcs)
{
843,6 → 1037,12
}
EXPORT_SYMBOL(drm_bridge_init);
 
/**
* drm_bridge_cleanup - cleans up an initialised bridge
* @bridge: bridge to cleanup
*
* Cleans up the bridge but doesn't free the object.
*/
void drm_bridge_cleanup(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
855,6 → 1055,19
}
EXPORT_SYMBOL(drm_bridge_cleanup);
 
/**
* drm_encoder_init - Init a preallocated encoder
* @dev: drm device
* @encoder: the encoder to init
* @funcs: callbacks for this encoder
* @encoder_type: user visible type of the encoder
*
* Initialises a preallocated encoder. Encoder should be
* subclassed as part of driver encoder objects.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
866,16 → 1079,27
 
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
goto out;
goto out_unlock;
 
encoder->dev = dev;
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
drm_encoder_enum_list[encoder_type].name,
encoder->base.id);
if (!encoder->name) {
ret = -ENOMEM;
goto out_put;
}
 
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
dev->mode_config.num_encoder++;
 
out:
out_put:
if (ret)
drm_mode_object_put(dev, &encoder->base);
 
out_unlock:
drm_modeset_unlock_all(dev);
 
return ret;
882,11 → 1106,19
}
EXPORT_SYMBOL(drm_encoder_init);
 
/**
* drm_encoder_cleanup - cleans up an initialised encoder
* @encoder: encoder to cleanup
*
* Cleans up the encoder but doesn't free the object.
*/
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
encoder->name = NULL;
list_del(&encoder->head);
dev->mode_config.num_encoder--;
drm_modeset_unlock_all(dev);
894,7 → 1126,7
EXPORT_SYMBOL(drm_encoder_cleanup);
 
/**
* drm_plane_init - Initialise a new plane object
* drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device
* @plane: plane object to init
* @possible_crtcs: bitmask of possible CRTCs
901,18 → 1133,18
* @funcs: callbacks for the new plane
* @formats: array of supported formats (%DRM_FORMAT_*)
* @format_count: number of elements in @formats
* @priv: plane is private (hidden from userspace)?
* @type: type of plane (overlay, primary, cursor)
*
* Inits a new object created as base part of a driver plane object.
* Initializes a plane object of type @type.
*
* RETURNS:
* Returns:
* Zero on success, error code on failure.
*/
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, uint32_t format_count,
bool priv)
enum drm_plane_type type)
{
int ret;
 
937,23 → 1169,53
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs;
plane->type = type;
 
/* private planes are not exposed to userspace, but depending on
* display hardware, might be convenient to allow sharing programming
* for the scanout engine with the crtc implementation.
*/
if (!priv) {
list_add_tail(&plane->head, &dev->mode_config.plane_list);
dev->mode_config.num_plane++;
} else {
INIT_LIST_HEAD(&plane->head);
}
dev->mode_config.num_total_plane++;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
dev->mode_config.num_overlay_plane++;
 
drm_object_attach_property(&plane->base,
dev->mode_config.plane_type_property,
plane->type);
 
out:
drm_modeset_unlock_all(dev);
 
return ret;
}
EXPORT_SYMBOL(drm_universal_plane_init);
 
/**
* drm_plane_init - Initialize a legacy plane
* @dev: DRM device
* @plane: plane object to init
* @possible_crtcs: bitmask of possible CRTCs
* @funcs: callbacks for the new plane
* @formats: array of supported formats (%DRM_FORMAT_*)
* @format_count: number of elements in @formats
* @is_primary: plane type (primary vs overlay)
*
* Legacy API to initialize a DRM plane.
*
* New drivers should call drm_universal_plane_init() instead.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, uint32_t format_count,
bool is_primary)
{
enum drm_plane_type type;
 
type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
formats, format_count, type);
}
EXPORT_SYMBOL(drm_plane_init);
 
/**
971,11 → 1233,13
drm_modeset_lock_all(dev);
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
/* if not added to a list, it must be a private plane */
if (!list_empty(&plane->head)) {
 
BUG_ON(list_empty(&plane->head));
 
list_del(&plane->head);
dev->mode_config.num_plane--;
}
dev->mode_config.num_total_plane--;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
dev->mode_config.num_overlay_plane--;
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_plane_cleanup);
991,69 → 1255,29
*/
void drm_plane_force_disable(struct drm_plane *plane)
{
struct drm_framebuffer *old_fb = plane->fb;
int ret;
 
if (!plane->fb)
if (!old_fb)
return;
 
ret = plane->funcs->disable_plane(plane);
if (ret)
if (ret) {
DRM_ERROR("failed to disable plane with busy fb\n");
return;
}
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(plane->fb);
__drm_framebuffer_unreference(old_fb);
plane->fb = NULL;
plane->crtc = NULL;
}
EXPORT_SYMBOL(drm_plane_force_disable);
 
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
*
* Create a new drm_display_mode, give it an ID, and return it.
*
* RETURNS:
* Pointer to new mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_create(struct drm_device *dev)
{
struct drm_display_mode *nmode;
 
nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
if (!nmode)
return NULL;
 
if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
kfree(nmode);
return NULL;
}
 
return nmode;
}
EXPORT_SYMBOL(drm_mode_create);
 
/**
* drm_mode_destroy - remove a mode
* @dev: DRM device
* @mode: mode to remove
*
* Free @mode's unique identifier, then free it.
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
{
if (!mode)
return;
 
drm_mode_object_put(dev, &mode->base);
 
kfree(mode);
}
EXPORT_SYMBOL(drm_mode_destroy);
 
static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
struct drm_property *dpms;
struct drm_property *dev_path;
 
/*
* Standard properties (apply to all connectors)
1068,9 → 1292,30
ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
 
dev_path = drm_property_create(dev,
DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"PATH", 0);
dev->mode_config.path_property = dev_path;
 
return 0;
}
 
static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
{
struct drm_property *type;
 
/*
* Standard properties (apply to all planes)
*/
type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"type", drm_plane_type_enum_list,
ARRAY_SIZE(drm_plane_type_enum_list));
dev->mode_config.plane_type_property = type;
 
return 0;
}
 
/**
* drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
* @dev: DRM device
1209,6 → 1454,33
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
 
/**
* drm_mode_create_aspect_ratio_property - create aspect ratio property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
{
if (dev->mode_config.aspect_ratio_property)
return 0;
 
dev->mode_config.aspect_ratio_property =
drm_property_create_enum(dev, 0, "aspect ratio",
drm_aspect_ratio_enum_list,
ARRAY_SIZE(drm_aspect_ratio_enum_list));
 
if (dev->mode_config.aspect_ratio_property == NULL)
return -ENOMEM;
 
return 0;
}
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
 
/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
1253,6 → 1525,16
return 0;
}
 
void drm_mode_group_destroy(struct drm_mode_group *group)
{
kfree(group->id_list);
group->id_list = NULL;
}
 
/*
* NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
* the drm core's responsibility to set up mode control groups.
*/
int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_mode_group *group)
{
1285,6 → 1567,15
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
 
void drm_reinit_primary_mode_group(struct drm_device *dev)
{
drm_modeset_lock_all(dev);
drm_mode_group_destroy(&dev->primary->mode_group);
drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_reinit_primary_mode_group);
 
/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
1329,7 → 1620,7
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
static int drm_crtc_convert_umode(struct drm_display_mode *out,
1374,7 → 1665,7
*
* Called by the user via ioctl.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
1427,9 → 1718,9
mutex_unlock(&file_priv->fbs_lock);
 
drm_modeset_lock_all(dev);
mode_group = &file_priv->master->minor->mode_group;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
if (!drm_is_primary_client(file_priv)) {
 
mode_group = NULL;
list_for_each(lh, &dev->mode_config.crtc_list)
crtc_count++;
 
1440,6 → 1731,7
encoder_count++;
} else {
 
mode_group = &file_priv->master->minor->mode_group;
crtc_count = mode_group->num_crtcs;
connector_count = mode_group->num_connectors;
encoder_count = mode_group->num_encoders;
1454,7 → 1746,7
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
if (!mode_group) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) {
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
1481,12 → 1773,12
if (card_res->count_encoders >= encoder_count) {
copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
if (!mode_group) {
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
head) {
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
drm_get_encoder_name(encoder));
encoder->name);
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
1512,13 → 1804,13
if (card_res->count_connectors >= connector_count) {
copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
if (!mode_group) {
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector));
connector->name);
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
1559,7 → 1851,7
*
* Called by the user via ioctl.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
1567,7 → 1859,6
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
struct drm_mode_object *obj;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
1575,19 → 1866,17
 
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
 
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
if (crtc->fb)
crtc_resp->fb_id = crtc->fb->base.id;
if (crtc->primary->fb)
crtc_resp->fb_id = crtc->primary->fb->base.id;
else
crtc_resp->fb_id = 0;
 
1628,7 → 1917,7
*
* Called by the user via ioctl.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getconnector(struct drm_device *dev, void *data,
1635,7 → 1924,6
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
struct drm_mode_object *obj;
struct drm_connector *connector;
struct drm_display_mode *mode;
int mode_count = 0;
1659,13 → 1947,11
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, out_resp->connector_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
connector = drm_connector_find(dev, out_resp->connector_id);
if (!connector) {
ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
 
props_count = connector->properties.count;
 
1693,10 → 1979,12
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
if (connector->encoder)
out_resp->encoder_id = connector->encoder->base.id;
else
out_resp->encoder_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
/*
* This ioctl is called twice, once to determine how much space is
1763,11 → 2051,23
return ret;
}
 
/**
* drm_mode_getencoder - get encoder configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Construct a encoder configuration structure to return to the user.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
struct drm_mode_object *obj;
struct drm_encoder *encoder;
int ret = 0;
 
1775,13 → 2075,11
return -EINVAL;
 
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
encoder = drm_encoder_find(dev, enc_resp->encoder_id);
if (!encoder) {
ret = -ENOENT;
goto out;
}
encoder = obj_to_encoder(obj);
 
if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
1798,12 → 2096,17
}
 
/**
* drm_mode_getplane_res - get plane info
* drm_mode_getplane_res - enumerate all plane resources
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Return an plane count and set of IDs.
* Construct a list of plane ids to return to the user.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv)
1813,6 → 2116,7
struct drm_plane *plane;
uint32_t __user *plane_ptr;
int copied = 0, ret = 0;
unsigned num_planes;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
1820,15 → 2124,28
drm_modeset_lock_all(dev);
config = &dev->mode_config;
 
if (file_priv->universal_planes)
num_planes = config->num_total_plane;
else
num_planes = config->num_overlay_plane;
 
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (config->num_plane &&
(plane_resp->count_planes >= config->num_plane)) {
if (num_planes &&
(plane_resp->count_planes >= num_planes)) {
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
list_for_each_entry(plane, &config->plane_list, head) {
/*
* Unless userspace set the 'universal planes'
* capability bit, only advertise overlays.
*/
if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
!file_priv->universal_planes)
continue;
 
if (put_user(plane->base.id, plane_ptr + copied)) {
ret = -EFAULT;
goto out;
1836,7 → 2153,7
copied++;
}
}
plane_resp->count_planes = config->num_plane;
plane_resp->count_planes = num_planes;
 
out:
drm_modeset_unlock_all(dev);
1844,19 → 2161,22
}
 
/**
* drm_mode_getplane - get plane info
* drm_mode_getplane - get plane configuration
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Return plane info, including formats supported, gamma size, any
* current fb, etc.
* Construct a plane configuration structure to return to the user.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
uint32_t __user *format_ptr;
int ret = 0;
1865,13 → 2185,11
return -EINVAL;
 
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, plane_resp->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
plane = drm_plane_find(dev, plane_resp->plane_id);
if (!plane) {
ret = -ENOENT;
goto out;
}
plane = obj_to_plane(obj);
 
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
1908,72 → 2226,52
return ret;
}
 
/**
* drm_mode_setplane - set up or tear down an plane
* @dev: DRM device
* @data: ioctl data*
* @file_priv: DRM file info
/*
* setplane_internal - setplane handler for internal callers
*
* Set plane info, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
* Note that we assume an extra reference has already been taken on fb. If the
* update fails, this reference will be dropped before return; if it succeeds,
* the previous framebuffer (if any) will be unreferenced instead.
*
* src_{x,y,w,h} are provided in 16.16 fixed point format
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
/* src_{x,y,w,h} values are 16.16 fixed point */
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
struct drm_device *dev = plane->dev;
struct drm_framebuffer *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
obj = drm_mode_object_find(dev, plane_req->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
plane = obj_to_plane(obj);
 
/* No fb means shut it down */
if (!plane_req->fb_id) {
if (!fb) {
drm_modeset_lock_all(dev);
old_fb = plane->fb;
plane->funcs->disable_plane(plane);
ret = plane->funcs->disable_plane(plane);
if (!ret) {
plane->crtc = NULL;
plane->fb = NULL;
} else {
old_fb = NULL;
}
drm_modeset_unlock_all(dev);
goto out;
}
 
obj = drm_mode_object_find(dev, plane_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
ret = -ENOENT;
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_KMS("Invalid crtc for plane\n");
ret = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
 
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
ret = -ENOENT;
goto out;
}
 
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
if (fb->pixel_format == plane->format_types[i])
1989,47 → 2287,31
fb_height = fb->height << 16;
 
/* Make sure source coordinates are inside the fb. */
if (plane_req->src_w > fb_width ||
plane_req->src_x > fb_width - plane_req->src_w ||
plane_req->src_h > fb_height ||
plane_req->src_y > fb_height - plane_req->src_h) {
if (src_w > fb_width ||
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
plane_req->src_w >> 16,
((plane_req->src_w & 0xffff) * 15625) >> 10,
plane_req->src_h >> 16,
((plane_req->src_h & 0xffff) * 15625) >> 10,
plane_req->src_x >> 16,
((plane_req->src_x & 0xffff) * 15625) >> 10,
plane_req->src_y >> 16,
((plane_req->src_y & 0xffff) * 15625) >> 10);
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
ret = -ENOSPC;
goto out;
}
 
/* Give drivers some help against integer overflows */
if (plane_req->crtc_w > INT_MAX ||
plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
plane_req->crtc_h > INT_MAX ||
plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
plane_req->crtc_w, plane_req->crtc_h,
plane_req->crtc_x, plane_req->crtc_y);
ret = -ERANGE;
goto out;
}
 
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
if (!ret) {
old_fb = plane->fb;
plane->crtc = crtc;
plane->fb = fb;
fb = NULL;
} else {
old_fb = NULL;
}
drm_modeset_unlock_all(dev);
 
2040,7 → 2322,86
drm_framebuffer_unreference(old_fb);
 
return ret;
 
}
 
/**
* drm_mode_setplane - configure a plane's configuration
* @dev: DRM device
* @data: ioctl data*
* @file_priv: DRM file info
*
* Set plane configuration, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable (planes may be disabled without providing a
* valid crtc).
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb = NULL;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
/* Give drivers some help against integer overflows */
if (plane_req->crtc_w > INT_MAX ||
plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
plane_req->crtc_h > INT_MAX ||
plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
plane_req->crtc_w, plane_req->crtc_h,
plane_req->crtc_x, plane_req->crtc_y);
return -ERANGE;
}
 
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
obj = drm_mode_object_find(dev, plane_req->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
plane = obj_to_plane(obj);
 
if (plane_req->fb_id) {
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
return -ENOENT;
}
 
obj = drm_mode_object_find(dev, plane_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;
}
crtc = obj_to_crtc(obj);
}
 
/*
* setplane_internal will take care of deref'ing either the old or new
* framebuffer depending on success.
*/
return setplane_internal(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
}
#endif
 
/**
2049,6 → 2410,9
*
* This is a little helper to wrap internal calls to the ->set_config driver
* interface. The only thing it adds is correct refcounting dance.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
2063,19 → 2427,19
* crtcs. Atomic modeset will have saner semantics ...
*/
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
tmp->old_fb = tmp->fb;
tmp->old_fb = tmp->primary->fb;
 
fb = set->fb;
 
ret = crtc->funcs->set_config(set);
if (ret == 0) {
/* crtc->fb must be updated by ->set_config, enforces this. */
WARN_ON(fb != crtc->fb);
crtc->primary->crtc = crtc;
crtc->primary->fb = fb;
}
 
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
if (tmp->fb)
drm_framebuffer_reference(tmp->fb);
if (tmp->primary->fb)
drm_framebuffer_reference(tmp->primary->fb);
// if (tmp->old_fb)
// drm_framebuffer_unreference(tmp->old_fb);
}
2085,11 → 2449,16
EXPORT_SYMBOL(drm_mode_set_config_internal);
 
#if 0
/*
* Checks that the framebuffer is big enough for the CRTC viewport
* (x, y, hdisplay, vdisplay)
/**
* drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
* CRTC viewport
* @crtc: CRTC that framebuffer will be displayed on
* @x: x panning
* @y: y panning
* @mode: mode that framebuffer will be displayed under
* @fb: framebuffer to check size of
*/
static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb)
2123,6 → 2492,7
 
return 0;
}
EXPORT_SYMBOL(drm_crtc_check_viewport);
 
/**
* drm_mode_setcrtc - set CRTC configuration
2134,7 → 2504,7
*
* Called by the user via ioctl.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
2142,7 → 2512,6
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
2160,14 → 2529,12
return -ERANGE;
 
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, crtc_req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
if (crtc_req->mode_valid) {
2174,12 → 2541,12
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
if (!crtc->fb) {
if (!crtc->primary->fb) {
DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
ret = -EINVAL;
goto out;
}
fb = crtc->fb;
fb = crtc->primary->fb;
/* Make refcounting symmetric with the lookup path. */
drm_framebuffer_reference(fb);
} else {
2250,18 → 2617,16
goto out;
}
 
obj = drm_mode_object_find(dev, out_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
connector = drm_connector_find(dev, out_id);
if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
ret = -ENOENT;
goto out;
}
connector = obj_to_connector(obj);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector));
connector->name);
 
connector_set[i] = connector;
}
2290,7 → 2655,6
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
 
2300,14 → 2664,20
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
 
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -ENOENT;
}
crtc = obj_to_crtc(obj);
 
mutex_lock(&crtc->mutex);
/*
* If this crtc has a universal cursor plane, call that plane's update
* handler rather than using legacy cursor handlers.
*/
if (crtc->cursor)
return drm_mode_cursor_universal(crtc, req, file_priv);
 
drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
2331,11 → 2701,26
}
}
out:
mutex_unlock(&crtc->mutex);
drm_modeset_unlock(&crtc->mutex);
 
return ret;
 
}
 
 
/**
* drm_mode_cursor_ioctl - set CRTC's cursor configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Set the cursor configuration based on user request.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
2348,6 → 2733,21
return drm_mode_cursor_common(dev, &new_req, file_priv);
}
 
/**
* drm_mode_cursor2_ioctl - set CRTC's cursor configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Set the cursor configuration based on user request. This implements the 2nd
* version of the cursor ioctl, which allows userspace to additionally specify
* the hotspot of the pointer.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
2356,7 → 2756,14
}
#endif
 
/* Original addfb only supported RGB formats, so figure out which one */
/**
* drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
* @bpp: bits per pixels
* @depth: bit depth per pixel
*
* Computes a drm fourcc pixel format code for the given @bpp/@depth values.
* Useful in fbdev emulation code, since that deals in those values.
*/
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
uint32_t fmt;
2398,11 → 2805,12
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request.
* Add a new FB to the specified CRTC, given a user request. This is the
* original addfb ioclt which only supported RGB formats.
*
* Called by the user via ioctl.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev,
2569,54 → 2977,38
return 0;
}
 
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request with format.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv)
{
struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if (r->flags & ~DRM_MODE_FB_INTERLACED) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return -EINVAL;
return ERR_PTR(-EINVAL);
}
 
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
return -EINVAL;
return ERR_PTR(-EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
return -EINVAL;
return ERR_PTR(-EINVAL);
}
 
ret = framebuffer_check(r);
if (ret)
return ret;
return ERR_PTR(ret);
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
return PTR_ERR(fb);
return fb;
}
 
mutex_lock(&file_priv->fbs_lock);
2625,8 → 3017,37
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
 
return fb;
}
 
return ret;
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request with format. This is
* the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
* and uses fourcc codes as pixel format specifiers.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_framebuffer *fb;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
fb = add_framebuffer_internal(dev, data, file_priv);
if (IS_ERR(fb))
return PTR_ERR(fb);
 
return 0;
}
 
/**
2639,7 → 3060,7
*
* Called by the user via ioctl.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev,
2693,7 → 3114,7
*
* Called by the user via ioctl.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
2737,6 → 3158,25
return ret;
}
 
/**
* drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Lookup the FB and flush out the damaged area supplied by userspace as a clip
* rectangle list. Generic userspace which does frontbuffer rendering must call
* this ioctl to flush out the changes on manual-update display outputs, e.g.
* usb display-link, mipi manual update panels or edp panel self refresh modes.
*
* Modesetting drivers which always update the frontbuffer do not need to
* implement the corresponding ->dirty framebuffer callback.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
2814,7 → 3254,7
*
* Called by the user via ioctl.
*
* RETURNS:
* Returns:
* Zero on success, errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
2840,6 → 3280,20
#endif
 
 
/**
* drm_property_create - create a new property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values)
{
2850,6 → 3304,8
if (!property)
return NULL;
 
property->dev = dev;
 
if (num_values) {
property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
if (!property->values)
2870,6 → 3326,9
}
 
list_add_tail(&property->head, &dev->mode_config.property_list);
 
WARN_ON(!drm_property_type_valid(property));
 
return property;
fail:
kfree(property->values);
2878,6 → 3337,24
}
EXPORT_SYMBOL(drm_property_create);
 
/**
* drm_property_create_enum - create a new enumeration property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @props: enumeration lists with property values
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Userspace is only allowed to set one of the predefined values for enumeration
* properties.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
const struct drm_prop_enum_list *props,
2906,13 → 3383,33
}
EXPORT_SYMBOL(drm_property_create_enum);
 
/**
* drm_property_create_bitmask - create a new bitmask property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @props: enumeration lists with property bitflags
* @num_values: number of pre-defined values
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Compared to plain enumeration properties userspace is allowed to set any
* or'ed together combination of the predefined property bitflag values
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values)
int num_props,
uint64_t supported_bits)
{
struct drm_property *property;
int i, ret;
int i, ret, index = 0;
int num_values = hweight64(supported_bits);
 
flags |= DRM_MODE_PROP_BITMASK;
 
2919,9 → 3416,16
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
for (i = 0; i < num_props; i++) {
if (!(supported_bits & (1ULL << props[i].type)))
continue;
 
for (i = 0; i < num_values; i++) {
ret = drm_property_add_enum(property, i,
if (WARN_ON(index >= num_values)) {
drm_property_destroy(dev, property);
return NULL;
}
 
ret = drm_property_add_enum(property, index++,
props[i].type,
props[i].name);
if (ret) {
2934,14 → 3438,12
}
EXPORT_SYMBOL(drm_property_create_bitmask);
 
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
static struct drm_property *property_create_range(struct drm_device *dev,
int flags, const char *name,
uint64_t min, uint64_t max)
{
struct drm_property *property;
 
flags |= DRM_MODE_PROP_RANGE;
 
property = drm_property_create(dev, flags, name, 2);
if (!property)
return NULL;
2951,14 → 3453,82
 
return property;
}
 
/**
* drm_property_create_range - create a new ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @min: minimum value of the property
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Userspace is allowed to set any interger value in the (min, max) range
* inclusive.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
{
return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
name, min, max);
}
EXPORT_SYMBOL(drm_property_create_range);
 
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
int flags, const char *name,
int64_t min, int64_t max)
{
return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
name, I642U64(min), I642U64(max));
}
EXPORT_SYMBOL(drm_property_create_signed_range);
 
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type)
{
struct drm_property *property;
 
flags |= DRM_MODE_PROP_OBJECT;
 
property = drm_property_create(dev, flags, name, 1);
if (!property)
return NULL;
 
property->values[0] = type;
 
return property;
}
EXPORT_SYMBOL(drm_property_create_object);
 
/**
* drm_property_add_enum - add a possible value to an enumeration property
* @property: enumeration property to change
* @index: index of the new enumeration
* @value: value of the new enumeration
* @name: symbolic name of the new enumeration
*
* This functions adds enumerations to a property.
*
* It's use is deprecated, drivers should use one of the more specific helpers
* to directly create the property with all enumerations already attached.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name)
{
struct drm_property_enum *prop_enum;
 
if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
return -EINVAL;
 
/*
2965,7 → 3535,8
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
(value > 63))
return -EINVAL;
 
if (!list_empty(&property->enum_blob_list)) {
2992,6 → 3563,14
}
EXPORT_SYMBOL(drm_property_add_enum);
 
/**
* drm_property_destroy - destroy a drm property
* @dev: drm device
* @property: property to destry
*
* This function frees a property including any attached resources like
* enumeration values.
*/
void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
{
struct drm_property_enum *prop_enum, *pt;
3009,6 → 3588,16
}
EXPORT_SYMBOL(drm_property_destroy);
 
/**
* drm_object_attach_property - attach a property to a modeset object
* @obj: drm modeset object
* @property: property to attach
* @init_val: initial value of the property
*
* This attaches the given property to the modeset object with the given initial
* value. Currently this function cannot fail since the properties are stored in
* a statically sized array.
*/
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
3029,6 → 3618,19
}
EXPORT_SYMBOL(drm_object_attach_property);
 
/**
* drm_object_property_set_value - set the value of a property
* @obj: drm mode object to set property value for
* @property: property to set
* @val: value the property should be set to
*
* This functions sets a given property on a given object. This function only
* changes the software state of the property, it does not call into the
* driver's ->set_property callback.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t val)
{
3045,6 → 3647,20
}
EXPORT_SYMBOL(drm_object_property_set_value);
 
/**
* drm_object_property_get_value - retrieve the value of a property
* @obj: drm mode object to get property value from
* @property: property to retrieve
* @val: storage for the property value
*
* This function retrieves the softare state of the given property for the given
* property. Since there is no driver callback to retrieve the current property
* value this might be out of sync with the hardware, depending upon the driver
* and property.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
3062,10 → 3678,22
EXPORT_SYMBOL(drm_object_property_get_value);
 
#if 0
/**
* drm_mode_getproperty_ioctl - get the current value of a connector's property
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* This function retrieves the current value for an connectors's property.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
3084,17 → 3712,17
return -EINVAL;
 
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
property = drm_property_find(dev, out_resp->prop_id);
if (!property) {
ret = -ENOENT;
goto done;
}
property = obj_to_property(obj);
 
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
list_for_each_entry(prop_blob, &property->enum_blob_list, head)
blob_count++;
}
3116,7 → 3744,8
}
out_resp->count_values = value_count;
 
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
3138,7 → 3767,7
out_resp->count_enum_blobs = enum_count;
}
 
if (property->flags & DRM_MODE_PROP_BLOB) {
if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
3202,10 → 3831,23
}
 
#if 0
/**
* drm_mode_getblob_ioctl - get the contents of a blob property value
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* This function retrieves the contents of a blob property. The value stored in
* an object's blob property is just a normal modeset object id.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
3215,12 → 3857,11
return -EINVAL;
 
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
blob = drm_property_blob_find(dev, out_resp->blob_id);
if (!blob) {
ret = -ENOENT;
goto done;
}
blob = obj_to_blob(obj);
 
if (out_resp->length == blob->length) {
blob_ptr = (void __user *)(unsigned long)out_resp->data;
3237,6 → 3878,36
}
#endif
 
int drm_mode_connector_set_path_property(struct drm_connector *connector,
char *path)
{
struct drm_device *dev = connector->dev;
int ret, size;
size = strlen(path) + 1;
 
connector->path_blob_ptr = drm_property_create_blob(connector->dev,
size, path);
if (!connector->path_blob_ptr)
return -EINVAL;
 
ret = drm_object_property_set_value(&connector->base,
dev->mode_config.path_property,
connector->path_blob_ptr->base.id);
return ret;
}
EXPORT_SYMBOL(drm_mode_connector_set_path_property);
 
/**
* drm_mode_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
*
* This function creates a new blob modeset object and assigns its id to the
* connector's edid property.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
3243,6 → 3914,10
struct drm_device *dev = connector->dev;
int ret, size;
 
/* ignore requests to set edid when overridden */
if (connector->override_edid)
return 0;
 
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
 
3320,6 → 3995,21
return ret;
}
 
/**
* drm_mode_getproperty_ioctl - get the current value of a object's property
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* This function retrieves the current value for an object's property. Compared
* to the connector specific ioctl this one is extended to also work on crtc and
* plane objects.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
3376,6 → 4066,22
return ret;
}
 
/**
* drm_mode_obj_set_property_ioctl - set the current value of an object's property
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* This function sets the current value for an object's property. It also calls
* into a driver's ->set_property callback to update the hardware state.
* Compared to the connector specific ioctl this one is extended to also work on
* crtc and plane objects.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
3436,6 → 4142,18
}
#endif
 
/**
* drm_mode_connector_attach_encoder - attach a connector to an encoder
* @connector: connector to attach
* @encoder: encoder to attach @connector to
*
* This function links up a connector to an encoder. Note that the routing
* restrictions between encoders and crtcs are exposed to userspace through the
* possible_clones and possible_crtcs bitmasks.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
3451,21 → 4169,18
}
EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
 
void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == encoder->base.id) {
connector->encoder_ids[i] = 0;
if (connector->encoder == encoder)
connector->encoder = NULL;
break;
}
}
}
EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
 
/**
* drm_mode_crtc_set_gamma_size - set the gamma table size
* @crtc: CRTC to set the gamma table size for
* @gamma_size: size of the gamma table
*
* Drivers which support gamma tables should set this to the supported gamma
* table size when initializing the CRTC. Currently the drm core only supports a
* fixed gamma table size.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
3482,11 → 4197,24
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
 
#if 0
/**
* drm_mode_gamma_set_ioctl - set the gamma table
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Set the gamma table of a CRTC to the one passed in by the user. Userspace can
* inquire the required gamma table size through drm_mode_gamma_get_ioctl.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
3496,12 → 4224,11
return -EINVAL;
 
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
 
if (crtc->funcs->gamma_set == NULL) {
ret = -ENOSYS;
3541,11 → 4268,25
 
}
 
/**
* drm_mode_gamma_get_ioctl - get the gamma table
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Copy the current gamma table into the storage provided. This also provides
* the gamma table size the driver expects, which can be used to size the
* allocated storage.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
3555,12 → 4296,11
return -EINVAL;
 
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
 
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
3594,6 → 4334,14
#endif
 
 
/**
* drm_mode_config_reset - call ->reset callbacks
* @dev: drm device
*
* This functions calls all the crtc's, encoder's and connector's ->reset
* callback. Drivers can use this in e.g. their driver load or resume code to
* reset hardware and software state.
*/
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
3690,7 → 4438,7
* drm_format_num_planes - get the number of planes for format
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* Returns:
* The number of planes used by the specified pixel format.
*/
int drm_format_num_planes(uint32_t format)
3725,7 → 4473,7
* @format: pixel format (DRM_FORMAT_*)
* @plane: plane index
*
* RETURNS:
* Returns:
* The bytes per pixel value for the specified plane.
*/
int drm_format_plane_cpp(uint32_t format, int plane)
3771,7 → 4519,7
* drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* Returns:
* The horizontal chroma subsampling factor for the
* specified pixel format.
*/
3806,7 → 4554,7
* drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* Returns:
* The vertical chroma subsampling factor for the
* specified pixel format.
*/
3842,6 → 4590,7
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
mutex_init(&dev->mode_config.fb_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
3856,6 → 4605,7
 
drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
drm_mode_create_standard_plane_properties(dev);
drm_modeset_unlock_all(dev);
 
/* Just to be sure */
3863,6 → 4613,21
dev->mode_config.num_connector = 0;
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
dev->mode_config.num_overlay_plane = 0;
dev->mode_config.num_total_plane = 0;
}
EXPORT_SYMBOL(drm_mode_config_init);
 
/**
* drm_mode_config_cleanup - free up DRM mode_config info
* @dev: DRM device
*
* Free up all the connectors and CRTCs associated with this DRM device, then
* free up the framebuffers and associated buffer objects.
*
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
*
* FIXME: cleanup any dangling user buffer objects too
*/
/drivers/video/drm/drm_crtc_helper.c
29,6 → 29,7
* Jesse Barnes <jesse.barnes@intel.com>
*/
 
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
 
72,160 → 73,16
}
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
 
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
 
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
struct drm_display_mode *mode;
 
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
DRM_MODE_FLAG_3D_MASK))
return;
 
list_for_each_entry(mode, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
!(flags & DRM_MODE_FLAG_3D_MASK))
mode->status = MODE_NO_STEREO;
}
 
return;
}
 
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be use as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
* RETURNS:
* Number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
int mode_flags = 0;
bool verbose_prune = true;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
 
if (connector->force) {
if (connector->force == DRM_FORCE_ON)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
}
 
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
drm_kms_helper_poll_enable(dev);
 
dev->mode_config.poll_running = drm_kms_helper_poll;
 
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
drm_mode_connector_update_edid_property(connector, NULL);
verbose_prune = false;
goto prune;
}
 
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
count = (*connector_funcs->get_modes)(connector);
 
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
if (count == 0)
goto prune;
 
drm_mode_connector_list_update(connector);
 
if (maxX && maxY)
drm_mode_validate_size(dev, &connector->modes, maxX,
maxY, 0);
 
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
drm_mode_validate_flag(connector, mode_flags);
 
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK)
mode->status = connector_funcs->mode_valid(connector,
mode);
}
 
prune:
drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
 
if (list_empty(&connector->modes))
return 0;
 
list_for_each_entry(mode, &connector->modes, head)
mode->vrefresh = drm_mode_vrefresh(mode);
 
drm_mode_sort(&connector->modes);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
drm_get_connector_name(connector));
list_for_each_entry(mode, &connector->modes, head) {
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
}
 
return count;
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
 
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
*
* LOCKING:
* Caller must hold mode config lock.
* Checks whether @encoder is with the current mode setting output configuration
* in use by any connector. This doesn't mean that it is actually enabled since
* the DPMS state is tracked separately.
*
* Walk @encoders's DRM device's mode_config and see if it's in use.
*
* RETURNS:
* True if @encoder is part of the mode_config, false otherwise.
* Returns:
* True if @encoder is used, false otherwise.
*/
bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
{
242,13 → 99,12
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
* @crtc: CRTC to check
*
* LOCKING:
* Caller must hold mode config lock.
* Checks whether @crtc is with the current mode setting output configuration
* in use by any connector. This doesn't mean that it is actually enabled since
* the DPMS state is tracked separately.
*
* Walk @crtc's DRM device's mode_config and see if it's in use.
*
* RETURNS:
* True if @crtc is part of the mode_config, false otherwise.
* Returns:
* True if @crtc is used, false otherwise.
*/
bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
{
279,33 → 135,17
encoder->bridge->funcs->post_disable(encoder->bridge);
}
 
/**
* drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* LOCKING:
* Caller must hold mode config lock.
*
* If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
* by calling its dpms function, which should power it off.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
static void __drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_crtc *crtc;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
if (connector->status == connector_status_disconnected)
connector->encoder = NULL;
}
drm_warn_on_modeset_not_all_locked(dev);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (!drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
/* disconnect encoder from any connector */
encoder->crtc = NULL;
}
}
318,10 → 158,27
(*crtc_funcs->disable)(crtc);
else
(*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
crtc->fb = NULL;
crtc->primary->fb = NULL;
}
}
}
 
/**
* drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* This function walks through the entire mode setting configuration of @dev. It
* will remove any crtc links of unused encoders and encoder links of
* disconnected connectors. Then it will disable all unused encoders and crtcs
* either by calling their disable callback if available or by calling their
* dpms callback with DRM_MODE_DPMS_OFF.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
drm_modeset_lock_all(dev);
__drm_helper_disable_unused_functions(dev);
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
/*
355,9 → 212,6
* @y: vertical offset into the surface
* @old_fb: old framebuffer, for cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
* to fixup or reject the mode prior to trying to set it. This is an internal
* helper that drivers could e.g. use to update properties that require the
367,8 → 221,8
* drm_crtc_helper_set_config() helper function to drive the mode setting
* sequence.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
* Returns:
* True if the mode was set successfully, false otherwise.
*/
bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
384,6 → 238,8
struct drm_encoder *encoder;
bool ret = true;
 
drm_warn_on_modeset_not_all_locked(dev);
 
saved_enabled = crtc->enabled;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
472,7 → 328,7
continue;
 
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.id, drm_get_encoder_name(encoder),
encoder->base.id, encoder->name,
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
523,8 → 379,7
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
 
 
static int
static void
drm_crtc_helper_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
552,8 → 407,7
}
}
 
drm_helper_disable_unused_functions(dev);
return 0;
__drm_helper_disable_unused_functions(dev);
}
 
/**
560,17 → 414,14
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
*
* LOCKING:
* Caller must hold mode config lock.
*
* Setup a new configuration, provided by the upper layers (either an ioctl call
* from userspace or internally e.g. from the fbdev suppport code) in @set, and
* from userspace or internally e.g. from the fbdev support code) in @set, and
* enable it. This is the main helper functions for drivers that implement
* kernel mode setting with the crtc helper functions and the assorted
* ->prepare(), ->modeset() and ->commit() helper callbacks.
*
* RETURNS:
* Returns 0 on success, -ERRNO on failure.
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
607,11 → 458,14
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
return drm_crtc_helper_disable(set->crtc);
drm_crtc_helper_disable(set->crtc);
return 0;
}
 
dev = set->crtc->dev;
 
drm_warn_on_modeset_not_all_locked(dev);
 
/*
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
647,19 → 501,19
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
save_set.fb = set->crtc->fb;
save_set.fb = set->crtc->primary->fb;
 
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->crtc->fb != set->fb) {
if (set->crtc->primary->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
if (set->crtc->primary->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
} else if (set->fb->pixel_format !=
set->crtc->fb->pixel_format) {
set->crtc->primary->fb->pixel_format) {
mode_changed = true;
} else
fb_changed = true;
689,12 → 543,13
if (new_encoder == NULL)
/* don't break so fail path works correct */
fail = 1;
break;
 
if (connector->dpms != DRM_MODE_DPMS_ON) {
DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
mode_changed = true;
}
 
break;
}
}
 
743,11 → 598,11
}
if (new_crtc) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.id, drm_get_connector_name(connector),
connector->base.id, connector->name,
new_crtc->base.id);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
}
}
 
760,13 → 615,13
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
set->crtc->fb = set->fb;
set->crtc->primary->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
save_set.fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
set->crtc->fb = save_set.fb;
set->crtc->primary->fb = save_set.fb;
ret = -EINVAL;
goto fail;
}
773,21 → 628,21
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
set->connectors[i]->name);
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
drm_helper_disable_unused_functions(dev);
__drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
set->crtc->x = set->x;
set->crtc->y = set->y;
set->crtc->fb = set->fb;
set->crtc->primary->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, save_set.fb);
if (ret != 0) {
set->crtc->x = save_set.x;
set->crtc->y = save_set.y;
set->crtc->fb = save_set.fb;
set->crtc->primary->fb = save_set.fb;
goto fail;
}
}
924,7 → 779,15
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
 
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
/**
* drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
* @fb: drm_framebuffer object to fill out
* @mode_cmd: metadata from the userspace fb creation request
*
* This helper can be used in a drivers fb_create callback to pre-fill the fb's
* metadata fields.
*/
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
938,18 → 801,39
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
 
return 0;
fb->flags = mode_cmd->flags;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
 
int drm_helper_resume_force_mode(struct drm_device *dev)
/**
* drm_helper_resume_force_mode - force-restore mode setting configuration
* @dev: drm_device which should be restored
*
* Drivers which use the mode setting helpers can use this function to
* force-restore the mode setting configuration e.g. on resume or when something
* else might have trampled over the hw state (like some overzealous old BIOSen
* tended to do).
*
* This helper doesn't provide a error return value since restoring the old
* config should never fail due to resource allocation issues since the driver
* has successfully set the restored configuration already. Hence this should
* boil down to the equivalent of a few dpms on calls, which also don't provide
* an error code.
*
* Drivers where simply restoring an old configuration again might fail (e.g.
* due to slight differences in allocating shared resources when the
* configuration is restored in a different order than when userspace set it up)
* need to use their own restore logic.
*/
void drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret, encoder_dpms;
int encoder_dpms;
bool ret;
 
drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 
if (!crtc->enabled)
956,8 → 840,9
continue;
 
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
crtc->x, crtc->y, crtc->primary->fb);
 
/* Restoring the old config should never fail! */
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
 
980,154 → 865,9
drm_helper_choose_crtc_dpms(crtc));
}
}
 
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
return 0;
__drm_helper_disable_unused_functions(dev);
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
 
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
/* send a uevent + call fbdev */
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status;
bool repoll = false, changed = false;
 
if (!drm_kms_helper_poll)
return;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
/* Ignore forced connectors. */
if (connector->force)
continue;
 
/* Ignore HPD capable connectors and connectors where we don't
* want any hotplug detection at all for polling. */
if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
continue;
 
repoll = true;
 
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
 
connector->status = connector->funcs->detect(connector, false);
if (old_status != connector->status) {
const char *old, *new;
 
old = drm_get_connector_status_name(old_status);
new = drm_get_connector_status_name(connector->status);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
"status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
old, new);
 
changed = true;
}
}
 
mutex_unlock(&dev->mode_config.mutex);
 
if (changed)
drm_kms_helper_hotplug_event(dev);
 
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
 
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
// cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
 
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
return;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
 
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
void drm_kms_helper_poll_init(struct drm_device *dev)
{
INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
dev->mode_config.poll_enabled = true;
 
drm_kms_helper_poll_enable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
 
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
enum drm_connector_status old_status;
bool changed = false;
 
if (!dev->mode_config.poll_enabled)
return false;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
/* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
 
old_status = connector->status;
 
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
if (old_status != connector->status)
changed = true;
}
 
mutex_unlock(&dev->mode_config.mutex);
 
if (changed)
drm_kms_helper_hotplug_event(dev);
 
return changed;
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
/drivers/video/drm/drm_crtc_internal.h
0,0 → 1,38
/*
* Copyright © 2006 Keith Packard
* Copyright © 2007-2008 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
* Copyright © 2014 Intel Corporation
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
/*
* This header file contains mode setting related functions and definitions
* which are only used within the drm module as internal implementation details
* and are not exported to drivers.
*/
 
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type);
void drm_mode_object_put(struct drm_device *dev,
struct drm_mode_object *object);
 
/drivers/video/drm/drm_dp_helper.c
27,8 → 27,8
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
 
/**
* DOC: dp helpers
206,7 → 206,7
* i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
* @adapter: i2c adapter to register
*
* This registers an i2c adapater that uses dp aux channel as it's underlaying
* This registers an i2c adapter that uses dp aux channel as it's underlaying
* transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
* and store it in the algo_data member of the @adapter argument. This will be
* used by the i2c over dp aux algorithm to drive the hardware.
213,6 → 213,10
*
* RETURNS:
* 0 on success, -ERRNO on failure.
*
* IMPORTANT:
* This interface is deprecated, please switch to the new dp aux helpers and
* drm_dp_aux_register().
*/
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
346,3 → 350,421
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
 
/**
* DOC: dp helpers
*
* The DisplayPort AUX channel is an abstraction to allow generic, driver-
* independent access to AUX functionality. Drivers can take advantage of
* this by filling in the fields of the drm_dp_aux structure.
*
* Transactions are described using a hardware-independent drm_dp_aux_msg
* structure, which is passed into a driver's .transfer() implementation.
* Both native and I2C-over-AUX transactions are supported.
*/
 
static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
unsigned int offset, void *buffer, size_t size)
{
struct drm_dp_aux_msg msg;
unsigned int retry;
int err;
 
memset(&msg, 0, sizeof(msg));
msg.address = offset;
msg.request = request;
msg.buffer = buffer;
msg.size = size;
 
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions, so retry 7 times like for I2C-over-AUX
* transactions.
*/
for (retry = 0; retry < 7; retry++) {
 
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
 
return err;
}
 
 
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
if (err < size)
return -EPROTO;
return err;
 
case DP_AUX_NATIVE_REPLY_NACK:
return -EIO;
 
case DP_AUX_NATIVE_REPLY_DEFER:
usleep(500);
break;
}
}
 
DRM_DEBUG_KMS("too many retries, giving up\n");
return -EIO;
}
 
/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the (first) register to read
* @buffer: buffer to store the register values
* @size: number of bytes in @buffer
*
* Returns the number of bytes transferred on success, or a negative error
* code on failure. -EIO is returned if the request was NAKed by the sink or
* if the retry count was exceeded. If not all bytes were transferred, this
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
*/
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
size);
}
EXPORT_SYMBOL(drm_dp_dpcd_read);
 
/**
* drm_dp_dpcd_write() - write a series of bytes to the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the (first) register to write
* @buffer: buffer containing the values to write
* @size: number of bytes in @buffer
*
* Returns the number of bytes transferred on success, or a negative error
* code on failure. -EIO is returned if the request was NAKed by the sink or
* if the retry count was exceeded. If not all bytes were transferred, this
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
*/
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
size);
}
EXPORT_SYMBOL(drm_dp_dpcd_write);
 
/**
* drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207)
* @aux: DisplayPort AUX channel
* @status: buffer to store the link status in (must be at least 6 bytes)
*
* Returns the number of bytes transferred on success or a negative error
* code on failure.
*/
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE])
{
return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
DP_LINK_STATUS_SIZE);
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
 
/**
* drm_dp_link_probe() - probe a DisplayPort link for capabilities
* @aux: DisplayPort AUX channel
* @link: pointer to structure in which to return link capabilities
*
* The structure filled in by this function can usually be passed directly
* into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
* configure the link based on the link's capabilities.
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 values[3];
int err;
 
memset(link, 0, sizeof(*link));
 
err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
if (err < 0)
return err;
 
link->revision = values[0];
link->rate = drm_dp_bw_code_to_link_rate(values[1]);
link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
 
if (values[2] & DP_ENHANCED_FRAME_CAP)
link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
 
return 0;
}
EXPORT_SYMBOL(drm_dp_link_probe);
 
/**
* drm_dp_link_power_up() - power up a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 value;
int err;
 
/* DP_SET_POWER register is only available on DPCD v1.1 and later */
if (link->revision < 0x11)
return 0;
 
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
if (err < 0)
return err;
 
value &= ~DP_SET_POWER_MASK;
value |= DP_SET_POWER_D0;
 
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
if (err < 0)
return err;
 
/*
* According to the DP 1.1 specification, a "Sink Device must exit the
* power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
* Control Field" (register 0x600).
*/
usleep(2000);
 
return 0;
}
EXPORT_SYMBOL(drm_dp_link_power_up);
 
/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 values[2];
int err;
 
values[0] = drm_dp_link_rate_to_bw_code(link->rate);
values[1] = link->num_lanes;
 
if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 
err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(drm_dp_link_configure);
 
/*
* I2C-over-AUX implementation
*/
 
static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR;
}
 
/*
* Transfer a single I2C-over-AUX message and handle various error conditions,
* retrying the transaction as appropriate. It is assumed that the
* aux->transfer function does not modify anything in the msg other than the
* reply field.
*/
static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
unsigned int retry;
int err;
 
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
* is required to retry at least seven times upon receiving AUX_DEFER
* before giving up the AUX transaction.
*/
for (retry = 0; retry < 7; retry++) {
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, msg);
mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
 
DRM_DEBUG_KMS("transaction failed: %d\n", err);
return err;
}
 
 
switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
/*
* For I2C-over-AUX transactions this isn't enough, we
* need to check for the I2C ACK reply.
*/
break;
 
case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("native nack\n");
return -EREMOTEIO;
 
case DP_AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("native defer");
/*
* We could check for I2C bit rate capabilities and if
* available adjust this interval. We could also be
* more careful with DP-to-legacy adapters where a
* long legacy cable may force very low I2C bit rates.
*
* For now just defer for long enough to hopefully be
* safe for all use-cases.
*/
usleep_range(500, 600);
continue;
 
default:
DRM_ERROR("invalid native reply %#04x\n", msg->reply);
return -EREMOTEIO;
}
 
switch (msg->reply & DP_AUX_I2C_REPLY_MASK) {
case DP_AUX_I2C_REPLY_ACK:
/*
* Both native ACK and I2C ACK replies received. We
* can assume the transfer was successful.
*/
if (err < msg->size)
return -EPROTO;
return 0;
 
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("I2C nack\n");
return -EREMOTEIO;
 
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("I2C defer\n");
usleep_range(400, 500);
continue;
 
default:
DRM_ERROR("invalid I2C reply %#04x\n", msg->reply);
return -EREMOTEIO;
}
}
 
DRM_DEBUG_KMS("too many retries, giving up\n");
return -EREMOTEIO;
}
 
static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num)
{
struct drm_dp_aux *aux = adapter->algo_data;
unsigned int i, j;
struct drm_dp_aux_msg msg;
int err = 0;
 
memset(&msg, 0, sizeof(msg));
 
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
msg.request = (msgs[i].flags & I2C_M_RD) ?
DP_AUX_I2C_READ :
DP_AUX_I2C_WRITE;
msg.request |= DP_AUX_I2C_MOT;
/* Send a bare address packet to start the transaction.
* Zero sized messages specify an address only (bare
* address) transaction.
*/
msg.buffer = NULL;
msg.size = 0;
err = drm_dp_i2c_do_msg(aux, &msg);
if (err < 0)
break;
/*
* Many hardware implementations support FIFOs larger than a
* single byte, but it has been empirically determined that
* transferring data in larger chunks can actually lead to
* decreased performance. Therefore each message is simply
* transferred byte-by-byte.
*/
for (j = 0; j < msgs[i].len; j++) {
msg.buffer = msgs[i].buf + j;
msg.size = 1;
 
err = drm_dp_i2c_do_msg(aux, &msg);
if (err < 0)
break;
}
if (err < 0)
break;
}
if (err >= 0)
err = num;
/* Send a bare address packet to close out the transaction.
* Zero sized messages specify an address only (bare
* address) transaction.
*/
msg.request &= ~DP_AUX_I2C_MOT;
msg.buffer = NULL;
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
 
return err;
}
 
static const struct i2c_algorithm drm_dp_i2c_algo = {
.functionality = drm_dp_i2c_functionality,
.master_xfer = drm_dp_i2c_xfer,
};
 
/**
* drm_dp_aux_register() - initialise and register aux channel
* @aux: DisplayPort AUX channel
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_aux_register(struct drm_dp_aux *aux)
{
mutex_init(&aux->hw_mutex);
 
aux->ddc.algo = &drm_dp_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
 
aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
// aux->ddc.dev.of_node = aux->dev->of_node;
 
strlcpy(aux->ddc.name, aux->name ? aux->name : "aux",
sizeof(aux->ddc.name));
 
return i2c_add_adapter(&aux->ddc);
}
EXPORT_SYMBOL(drm_dp_aux_register);
 
/**
* drm_dp_aux_unregister() - unregister an AUX adapter
* @aux: DisplayPort AUX channel
*/
void drm_dp_aux_unregister(struct drm_dp_aux *aux)
{
i2c_del_adapter(&aux->ddc);
}
EXPORT_SYMBOL(drm_dp_aux_unregister);
/drivers/video/drm/drm_dp_mst_topology.c
0,0 → 1,2666
/*
* Copyright © 2014 Red Hat
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
 
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/i2c.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drmP.h>
 
#include <drm/drm_fixed.h>
 
u64 get_jiffies_64(void)
{
return jiffies;
}
/**
* DOC: dp mst helper
*
* These functions contain parts of the DisplayPort 1.2a MultiStream Transport
* protocol. The helpers contain a topology manager and bandwidth manager.
* The helpers encapsulate the sending and received of sideband msgs.
*/
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf);
static int test_calc_pbn_mode(void);
 
static void drm_dp_put_port(struct drm_dp_mst_port *port);
 
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload);
 
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
 
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid);
 
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
/* sideband msg handling */
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
{
u8 bitmask = 0x80;
u8 bitshift = 7;
u8 array_index = 0;
int number_of_bits = num_nibbles * 4;
u8 remainder = 0;
 
while (number_of_bits != 0) {
number_of_bits--;
remainder <<= 1;
remainder |= (data[array_index] & bitmask) >> bitshift;
bitmask >>= 1;
bitshift--;
if (bitmask == 0) {
bitmask = 0x80;
bitshift = 7;
array_index++;
}
if ((remainder & 0x10) == 0x10)
remainder ^= 0x13;
}
 
number_of_bits = 4;
while (number_of_bits != 0) {
number_of_bits--;
remainder <<= 1;
if ((remainder & 0x10) != 0)
remainder ^= 0x13;
}
 
return remainder;
}
 
static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
{
u8 bitmask = 0x80;
u8 bitshift = 7;
u8 array_index = 0;
int number_of_bits = number_of_bytes * 8;
u16 remainder = 0;
 
while (number_of_bits != 0) {
number_of_bits--;
remainder <<= 1;
remainder |= (data[array_index] & bitmask) >> bitshift;
bitmask >>= 1;
bitshift--;
if (bitmask == 0) {
bitmask = 0x80;
bitshift = 7;
array_index++;
}
if ((remainder & 0x100) == 0x100)
remainder ^= 0xd5;
}
 
number_of_bits = 8;
while (number_of_bits != 0) {
number_of_bits--;
remainder <<= 1;
if ((remainder & 0x100) != 0)
remainder ^= 0xd5;
}
 
return remainder & 0xff;
}
static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
{
u8 size = 3;
size += (hdr->lct / 2);
return size;
}
 
static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
u8 *buf, int *len)
{
int idx = 0;
int i;
u8 crc4;
buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
for (i = 0; i < (hdr->lct / 2); i++)
buf[idx++] = hdr->rad[i];
buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
(hdr->msg_len & 0x3f);
buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
 
crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
buf[idx - 1] |= (crc4 & 0xf);
 
*len = idx;
}
 
static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
u8 *buf, int buflen, u8 *hdrlen)
{
u8 crc4;
u8 len;
int i;
u8 idx;
if (buf[0] == 0)
return false;
len = 3;
len += ((buf[0] & 0xf0) >> 4) / 2;
if (len > buflen)
return false;
crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
 
if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
return false;
}
 
hdr->lct = (buf[0] & 0xf0) >> 4;
hdr->lcr = (buf[0] & 0xf);
idx = 1;
for (i = 0; i < (hdr->lct / 2); i++)
hdr->rad[i] = buf[idx++];
hdr->broadcast = (buf[idx] >> 7) & 0x1;
hdr->path_msg = (buf[idx] >> 6) & 0x1;
hdr->msg_len = buf[idx] & 0x3f;
idx++;
hdr->somt = (buf[idx] >> 7) & 0x1;
hdr->eomt = (buf[idx] >> 6) & 0x1;
hdr->seqno = (buf[idx] >> 4) & 0x1;
idx++;
*hdrlen = idx;
return true;
}
 
static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
struct drm_dp_sideband_msg_tx *raw)
{
int idx = 0;
int i;
u8 *buf = raw->msg;
buf[idx++] = req->req_type & 0x7f;
 
switch (req->req_type) {
case DP_ENUM_PATH_RESOURCES:
buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
idx++;
break;
case DP_ALLOCATE_PAYLOAD:
buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
(req->u.allocate_payload.number_sdp_streams & 0xf);
idx++;
buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
idx++;
buf[idx] = (req->u.allocate_payload.pbn >> 8);
idx++;
buf[idx] = (req->u.allocate_payload.pbn & 0xff);
idx++;
for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
idx++;
}
if (req->u.allocate_payload.number_sdp_streams & 1) {
i = req->u.allocate_payload.number_sdp_streams - 1;
buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
idx++;
}
break;
case DP_QUERY_PAYLOAD:
buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
idx++;
buf[idx] = (req->u.query_payload.vcpi & 0x7f);
idx++;
break;
case DP_REMOTE_DPCD_READ:
buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
idx++;
buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
idx++;
buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
idx++;
buf[idx] = (req->u.dpcd_read.num_bytes);
idx++;
break;
 
case DP_REMOTE_DPCD_WRITE:
buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
idx++;
buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
idx++;
buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
idx++;
buf[idx] = (req->u.dpcd_write.num_bytes);
idx++;
memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
idx += req->u.dpcd_write.num_bytes;
break;
case DP_REMOTE_I2C_READ:
buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
idx++;
for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
idx++;
buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
idx++;
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
 
buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
idx++;
buf[idx] = (req->u.i2c_read.num_bytes_read);
idx++;
break;
 
case DP_REMOTE_I2C_WRITE:
buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
idx++;
buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
idx++;
buf[idx] = (req->u.i2c_write.num_bytes);
idx++;
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
}
raw->cur_len = idx;
}
 
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
{
u8 crc4;
crc4 = drm_dp_msg_data_crc4(msg, len);
msg[len] = crc4;
}
 
static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
struct drm_dp_sideband_msg_tx *raw)
{
int idx = 0;
u8 *buf = raw->msg;
 
buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
 
raw->cur_len = idx;
}
 
/* this adds a chunk of msg to the builder to get the final msg */
static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
u8 *replybuf, u8 replybuflen, bool hdr)
{
int ret;
u8 crc4;
 
if (hdr) {
u8 hdrlen;
struct drm_dp_sideband_msg_hdr recv_hdr;
ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
if (ret == false) {
print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
return false;
}
 
/* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen;
 
/* we have already gotten an somt - don't bother parsing */
if (recv_hdr.somt && msg->have_somt)
return false;
 
if (recv_hdr.somt) {
memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
msg->have_somt = true;
}
if (recv_hdr.eomt)
msg->have_eomt = true;
 
/* copy the bytes for the remainder of this header chunk */
msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
} else {
memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
msg->curchunk_idx += replybuflen;
}
 
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
/* copy chunk into bigger msg */
memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
msg->curlen += msg->curchunk_len - 1;
}
return true;
}
 
static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
int i;
memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
idx += 16;
repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
for (i = 0; i < repmsg->u.link_addr.nports; i++) {
if (raw->msg[idx] & 0x80)
repmsg->u.link_addr.ports[i].input_port = 1;
 
repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
 
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
if (repmsg->u.link_addr.ports[i].input_port == 0)
repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
idx++;
if (idx > raw->curlen)
goto fail_len;
if (repmsg->u.link_addr.ports[i].input_port == 0) {
repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
idx++;
if (idx > raw->curlen)
goto fail_len;
memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
idx++;
 
}
if (idx > raw->curlen)
goto fail_len;
}
 
return true;
fail_len:
DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
if (idx > raw->curlen)
goto fail_len;
 
memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
return true;
fail_len:
DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
return true;
fail_len:
DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
 
repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
idx++;
/* TODO check */
memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
return true;
fail_len:
DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
idx += 2;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
idx += 2;
if (idx > raw->curlen)
goto fail_len;
return true;
fail_len:
DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.allocate_payload.vcpi = raw->msg[idx];
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
idx += 2;
if (idx > raw->curlen)
goto fail_len;
return true;
fail_len:
DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
idx++;
if (idx > raw->curlen)
goto fail_len;
repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
idx += 2;
if (idx > raw->curlen)
goto fail_len;
return true;
fail_len:
DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *msg)
{
memset(msg, 0, sizeof(*msg));
msg->reply_type = (raw->msg[0] & 0x80) >> 7;
msg->req_type = (raw->msg[0] & 0x7f);
 
if (msg->reply_type) {
memcpy(msg->u.nak.guid, &raw->msg[1], 16);
msg->u.nak.reason = raw->msg[17];
msg->u.nak.nak_data = raw->msg[18];
return false;
}
 
switch (msg->req_type) {
case DP_LINK_ADDRESS:
return drm_dp_sideband_parse_link_address(raw, msg);
case DP_QUERY_PAYLOAD:
return drm_dp_sideband_parse_query_payload_ack(raw, msg);
case DP_REMOTE_DPCD_READ:
return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
case DP_REMOTE_DPCD_WRITE:
return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
case DP_REMOTE_I2C_READ:
return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
case DP_ENUM_PATH_RESOURCES:
return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
case DP_ALLOCATE_PAYLOAD:
return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
default:
DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
return false;
}
}
 
static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_req_body *msg)
{
int idx = 1;
 
msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
idx++;
if (idx > raw->curlen)
goto fail_len;
 
memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
 
msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
idx++;
return true;
fail_len:
DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_req_body *msg)
{
int idx = 1;
 
msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
idx++;
if (idx > raw->curlen)
goto fail_len;
 
memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
 
msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
idx++;
return true;
fail_len:
DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
 
static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_req_body *msg)
{
memset(msg, 0, sizeof(*msg));
msg->req_type = (raw->msg[0] & 0x7f);
 
switch (msg->req_type) {
case DP_CONNECTION_STATUS_NOTIFY:
return drm_dp_sideband_parse_connection_status_notify(raw, msg);
case DP_RESOURCE_STATUS_NOTIFY:
return drm_dp_sideband_parse_resource_status_notify(raw, msg);
default:
DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
return false;
}
}
 
static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
{
struct drm_dp_sideband_msg_req_body req;
 
req.req_type = DP_REMOTE_DPCD_WRITE;
req.u.dpcd_write.port_number = port_num;
req.u.dpcd_write.dpcd_address = offset;
req.u.dpcd_write.num_bytes = num_bytes;
req.u.dpcd_write.bytes = bytes;
drm_dp_encode_sideband_req(&req, msg);
 
return 0;
}
 
static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
 
req.req_type = DP_LINK_ADDRESS;
drm_dp_encode_sideband_req(&req, msg);
return 0;
}
 
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
{
struct drm_dp_sideband_msg_req_body req;
 
req.req_type = DP_ENUM_PATH_RESOURCES;
req.u.port_num.port_number = port_num;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
return 0;
}
 
static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
u8 vcpi, uint16_t pbn)
{
struct drm_dp_sideband_msg_req_body req;
memset(&req, 0, sizeof(req));
req.req_type = DP_ALLOCATE_PAYLOAD;
req.u.allocate_payload.port_number = port_num;
req.u.allocate_payload.vcpi = vcpi;
req.u.allocate_payload.pbn = pbn;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
return 0;
}
 
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi)
{
int ret;
 
mutex_lock(&mgr->payload_lock);
ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
if (ret > mgr->max_payloads) {
ret = -EINVAL;
DRM_DEBUG_KMS("out of payload ids %d\n", ret);
goto out_unlock;
}
 
set_bit(ret, &mgr->payload_mask);
vcpi->vcpi = ret;
mgr->proposed_vcpis[ret - 1] = vcpi;
out_unlock:
mutex_unlock(&mgr->payload_lock);
return ret;
}
 
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
int id)
{
if (id == 0)
return;
 
mutex_lock(&mgr->payload_lock);
DRM_DEBUG_KMS("putting payload %d\n", id);
clear_bit(id, &mgr->payload_mask);
mgr->proposed_vcpis[id - 1] = NULL;
mutex_unlock(&mgr->payload_lock);
}
 
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
bool ret;
mutex_lock(&mgr->qlock);
ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
mutex_unlock(&mgr->qlock);
return ret;
}
 
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
int ret;
 
ret = wait_event_timeout(mgr->tx_waitq,
check_txmsg_state(mgr, txmsg),
(4 * HZ));
mutex_lock(&mstb->mgr->qlock);
if (ret > 0) {
if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
ret = -EIO;
goto out;
}
} else {
DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
 
/* dump some state */
ret = -EIO;
 
/* remove from q */
if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
list_del(&txmsg->next);
}
 
if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
}
out:
mutex_unlock(&mgr->qlock);
 
return ret;
}
 
static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
{
struct drm_dp_mst_branch *mstb;
 
mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
if (!mstb)
return NULL;
 
mstb->lct = lct;
if (lct > 1)
memcpy(mstb->rad, rad, lct / 2);
INIT_LIST_HEAD(&mstb->ports);
kref_init(&mstb->kref);
return mstb;
}
 
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
 
cancel_work_sync(&mstb->mgr->work);
 
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
* device at this point.
*/
list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
list_del(&port->next);
drm_dp_put_port(port);
}
 
/* drop any tx slots msg */
mutex_lock(&mstb->mgr->qlock);
if (mstb->tx_slots[0]) {
mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
mstb->tx_slots[0] = NULL;
wake_tx = true;
}
if (mstb->tx_slots[1]) {
mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
mstb->tx_slots[1] = NULL;
wake_tx = true;
}
mutex_unlock(&mstb->mgr->qlock);
 
// if (wake_tx)
// wake_up(&mstb->mgr->tx_waitq);
kfree(mstb);
}
 
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
{
kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
}
 
 
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
{
switch (old_pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
/* remove i2c over sideband */
drm_dp_mst_unregister_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING:
drm_dp_put_mst_branch_device(port->mstb);
port->mstb = NULL;
break;
}
}
 
static void drm_dp_destroy_port(struct kref *kref)
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) {
port->vcpi.num_slots = 0;
if (port->connector)
(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
 
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
kfree(port);
 
(*mgr->cbs->hotplug)(mgr);
}
 
static void drm_dp_put_port(struct drm_dp_mst_port *port)
{
kref_put(&port->kref, drm_dp_destroy_port);
}
 
static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *rmstb;
if (to_find == mstb) {
kref_get(&mstb->kref);
return mstb;
}
list_for_each_entry(port, &mstb->ports, next) {
if (port->mstb) {
rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
if (rmstb)
return rmstb;
}
}
return NULL;
}
 
static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_branch *rmstb = NULL;
mutex_lock(&mgr->lock);
if (mgr->mst_primary)
rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
mutex_unlock(&mgr->lock);
return rmstb;
}
 
static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
{
struct drm_dp_mst_port *port, *mport;
 
list_for_each_entry(port, &mstb->ports, next) {
if (port == to_find) {
kref_get(&port->kref);
return port;
}
if (port->mstb) {
mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
if (mport)
return mport;
}
}
return NULL;
}
 
static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *rport = NULL;
mutex_lock(&mgr->lock);
if (mgr->mst_primary)
rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
mutex_unlock(&mgr->lock);
return rport;
}
 
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
{
struct drm_dp_mst_port *port;
 
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
kref_get(&port->kref);
return port;
}
}
 
return NULL;
}
 
/*
* calculate a new RAD for this MST branch device
* if parent has an LCT of 2 then it has 1 nibble of RAD,
* if parent has an LCT of 3 then it has 2 nibbles of RAD,
*/
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
u8 *rad)
{
int lct = port->parent->lct;
int shift = 4;
int idx = lct / 2;
if (lct > 1) {
memcpy(rad, port->parent->rad, idx);
shift = (lct % 2) ? 4 : 0;
} else
rad[0] = 0;
 
rad[idx] |= port->port_num << shift;
return lct + 1;
}
 
/*
* return sends link address for new mstb
*/
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
{
int ret;
u8 rad[6], lct;
bool send_link = false;
switch (port->pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING:
lct = drm_dp_calculate_rad(port, rad);
 
port->mstb = drm_dp_add_mst_branch_device(lct, rad);
port->mstb->mgr = port->mgr;
port->mstb->port_parent = port;
 
send_link = true;
break;
}
return send_link;
}
 
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port)
{
int ret;
if (port->dpcd_rev >= 0x12) {
port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
if (!port->guid_valid) {
ret = drm_dp_send_dpcd_write(mstb->mgr,
port,
DP_GUID,
16, port->guid);
port->guid_valid = true;
}
}
}
 
static void build_mst_prop_path(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *mstb,
char *proppath)
{
int i;
char temp[8];
snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = mstb->rad[i / 2] >> shift;
snprintf(temp, 8, "-%d", port_num);
strncat(proppath, temp, 255);
}
snprintf(temp, 8, "-%d", port->port_num);
strncat(proppath, temp, 255);
}
 
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
struct device *dev,
struct drm_dp_link_addr_reply_port *port_msg)
{
struct drm_dp_mst_port *port;
bool ret;
bool created = false;
int old_pdt = 0;
int old_ddps = 0;
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return;
kref_init(&port->kref);
port->parent = mstb;
port->port_num = port_msg->port_number;
port->mgr = mstb->mgr;
port->aux.name = "DPMST";
port->aux.dev = dev;
created = true;
} else {
old_pdt = port->pdt;
old_ddps = port->ddps;
}
 
port->pdt = port_msg->peer_device_type;
port->input = port_msg->input_port;
port->mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
memcpy(port->guid, port_msg->peer_guid, 16);
 
/* manage mstb port lists with mgr lock - take a reference
for this list */
if (created) {
mutex_lock(&mstb->mgr->lock);
kref_get(&port->kref);
list_add(&port->next, &mstb->ports);
mutex_unlock(&mstb->mgr->lock);
}
 
if (old_ddps != port->ddps) {
if (port->ddps) {
drm_dp_check_port_guid(mstb, port);
if (!port->input)
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
} else {
port->guid_valid = false;
port->available_pbn = 0;
}
}
 
if (old_pdt != port->pdt && !port->input) {
drm_dp_port_teardown_pdt(port, old_pdt);
 
ret = drm_dp_port_setup_pdt(port);
if (ret == true) {
drm_dp_send_link_address(mstb->mgr, port->mstb);
port->mstb->link_address_sent = true;
}
}
 
if (created && !port->input) {
char proppath[255];
build_mst_prop_path(port, mstb, proppath);
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
}
 
/* put reference to this port */
drm_dp_put_port(port);
}
 
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
struct drm_dp_connection_status_notify *conn_stat)
{
struct drm_dp_mst_port *port;
int old_pdt;
int old_ddps;
bool dowork = false;
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
return;
 
old_ddps = port->ddps;
old_pdt = port->pdt;
port->pdt = conn_stat->peer_device_type;
port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
 
if (old_ddps != port->ddps) {
if (port->ddps) {
drm_dp_check_port_guid(mstb, port);
dowork = true;
} else {
port->guid_valid = false;
port->available_pbn = 0;
}
}
if (old_pdt != port->pdt && !port->input) {
drm_dp_port_teardown_pdt(port, old_pdt);
 
if (drm_dp_port_setup_pdt(port))
dowork = true;
}
 
drm_dp_put_port(port);
// if (dowork)
// queue_work(system_long_wq, &mstb->mgr->work);
 
}
 
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
u8 lct, u8 *rad)
{
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_port *port;
int i;
/* find the port by iterating down */
mstb = mgr->mst_primary;
 
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = rad[i / 2] >> shift;
 
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
if (!port->mstb) {
DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
return NULL;
}
 
mstb = port->mstb;
break;
}
}
}
kref_get(&mstb->kref);
return mstb;
}
 
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
 
if (!mstb->link_address_sent) {
drm_dp_send_link_address(mgr, mstb);
mstb->link_address_sent = true;
}
list_for_each_entry(port, &mstb->ports, next) {
if (port->input)
continue;
 
if (!port->ddps)
continue;
 
if (!port->available_pbn)
drm_dp_send_enum_path_resources(mgr, mstb, port);
 
if (port->mstb)
drm_dp_check_and_send_link_address(mgr, port->mstb);
}
}
 
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
 
drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
 
}
 
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid)
{
static u8 zero_guid[16];
 
if (!memcmp(guid, zero_guid, 16)) {
u64 salt = get_jiffies_64();
memcpy(&guid[0], &salt, sizeof(u64));
memcpy(&guid[8], &salt, sizeof(u64));
return false;
}
return true;
}
 
#if 0
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
 
req.req_type = DP_REMOTE_DPCD_READ;
req.u.dpcd_read.port_number = port_num;
req.u.dpcd_read.dpcd_address = offset;
req.u.dpcd_read.num_bytes = num_bytes;
drm_dp_encode_sideband_req(&req, msg);
 
return 0;
}
#endif
 
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
bool up, u8 *msg, int len)
{
int ret;
int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
int tosend, total, offset;
int retries = 0;
 
retry:
total = len;
offset = 0;
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
 
ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
&msg[offset],
tosend);
if (ret != tosend) {
if (ret == -EIO && retries < 5) {
retries++;
goto retry;
}
DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
WARN(1, "fail\n");
 
return -EIO;
}
offset += tosend;
total -= tosend;
} while (total > 0);
return 0;
}
 
static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_branch *mstb = txmsg->dst;
 
/* both msg slots are full */
if (txmsg->seqno == -1) {
if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
return -EAGAIN;
}
if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
txmsg->seqno = mstb->last_seqno;
mstb->last_seqno ^= 1;
} else if (mstb->tx_slots[0] == NULL)
txmsg->seqno = 0;
else
txmsg->seqno = 1;
mstb->tx_slots[txmsg->seqno] = txmsg;
}
hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg;
hdr->lct = mstb->lct;
hdr->lcr = mstb->lct - 1;
if (mstb->lct > 1)
memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
hdr->seqno = txmsg->seqno;
return 0;
}
/*
* process a single block of the next message in the sideband queue
*/
static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg,
bool up)
{
u8 chunk[48];
struct drm_dp_sideband_msg_hdr hdr;
int len, space, idx, tosend;
int ret;
 
memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
 
if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
txmsg->seqno = -1;
txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
}
 
/* make hdr from dst mst - for replies use seqno
otherwise assign one */
ret = set_hdr_from_dst_qlock(&hdr, txmsg);
if (ret < 0)
return ret;
 
/* amount left to send in this message */
len = txmsg->cur_len - txmsg->cur_offset;
 
/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
 
tosend = min(len, space);
if (len == txmsg->cur_len)
hdr.somt = 1;
if (space >= len)
hdr.eomt = 1;
 
 
hdr.msg_len = tosend + 1;
drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
/* add crc at end */
drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
idx += tosend + 1;
 
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
if (ret) {
DRM_DEBUG_KMS("sideband msg failed to send\n");
return ret;
}
 
txmsg->cur_offset += tosend;
if (txmsg->cur_offset == txmsg->cur_len) {
txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
return 1;
}
return 0;
}
 
/* must be called holding qlock */
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
 
/* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_downq)) {
mgr->tx_down_in_progress = false;
return;
}
mgr->tx_down_in_progress = true;
 
txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
// wake_up(&mgr->tx_waitq);
}
if (list_empty(&mgr->tx_msg_downq)) {
mgr->tx_down_in_progress = false;
return;
}
}
 
/* called holding qlock */
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
 
/* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_upq)) {
mgr->tx_up_in_progress = false;
return;
}
 
txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, true);
if (ret == 1) {
/* up txmsgs aren't put in slots - so free after we send it */
list_del(&txmsg->next);
kfree(txmsg);
} else if (ret)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
mgr->tx_up_in_progress = true;
}
 
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
if (!mgr->tx_down_in_progress)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
 
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
int len;
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
 
txmsg->dst = mstb;
len = build_link_address(txmsg);
 
drm_dp_queue_down_tx(mgr, txmsg);
 
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
int i;
 
if (txmsg->reply.reply_type == 1)
DRM_DEBUG_KMS("link address nak received\n");
else {
DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
txmsg->reply.u.link_addr.ports[i].input_port,
txmsg->reply.u.link_addr.ports[i].peer_device_type,
txmsg->reply.u.link_addr.ports[i].port_number,
txmsg->reply.u.link_addr.ports[i].dpcd_revision,
txmsg->reply.u.link_addr.ports[i].mcs,
txmsg->reply.u.link_addr.ports[i].ddps,
txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
}
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
(*mgr->cbs->hotplug)(mgr);
}
} else
DRM_DEBUG_KMS("link address failed %d\n", ret);
 
kfree(txmsg);
return 0;
}
 
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port)
{
int len;
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
 
txmsg->dst = mstb;
len = build_enum_path_resources(txmsg, port->port_num);
 
drm_dp_queue_down_tx(mgr, txmsg);
 
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == 1)
DRM_DEBUG_KMS("enum path resources nak received\n");
else {
if (port->port_num != txmsg->reply.u.path_resources.port_number)
DRM_ERROR("got incorrect port in response\n");
DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
txmsg->reply.u.path_resources.avail_payload_bw_number);
port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
}
}
 
kfree(txmsg);
return 0;
}
 
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
int pbn)
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
int len, ret;
 
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb)
return -EINVAL;
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto fail_put;
}
 
txmsg->dst = mstb;
len = build_allocate_payload(txmsg, port->port_num,
id,
pbn);
 
drm_dp_queue_down_tx(mgr, txmsg);
 
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == 1) {
ret = -EINVAL;
} else
ret = 0;
}
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
return ret;
}
 
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload)
{
int ret;
 
ret = drm_dp_dpcd_write_payload(mgr, id, payload);
if (ret < 0) {
payload->payload_state = 0;
return ret;
}
payload->payload_state = DP_PAYLOAD_LOCAL;
return 0;
}
 
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
struct drm_dp_payload *payload)
{
int ret;
ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
if (ret < 0)
return ret;
payload->payload_state = DP_PAYLOAD_REMOTE;
return ret;
}
 
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
struct drm_dp_payload *payload)
{
DRM_DEBUG_KMS("\n");
/* its okay for these to fail */
if (port) {
drm_dp_payload_send_msg(mgr, port, id, 0);
}
 
drm_dp_dpcd_write_payload(mgr, id, payload);
payload->payload_state = 0;
return 0;
}
 
static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload)
{
payload->payload_state = 0;
return 0;
}
 
/**
* drm_dp_update_payload_part1() - Execute payload update part 1
* @mgr: manager to use.
*
* This iterates over all proposed virtual channels, and tries to
* allocate space in the link for them. For 0->slots transitions,
* this step just writes the VCPI to the MST device. For slots->0
* transitions, this writes the updated VCPIs and removes the
* remote VC payloads.
*
* after calling this the driver should generate ACT and payload
* packets.
*/
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
{
int i;
int cur_slots = 1;
struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
 
mutex_lock(&mgr->payload_lock);
for (i = 0; i < mgr->max_payloads; i++) {
/* solve the current payloads - compare to the hw ones
- update the hw view */
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
} else {
port = NULL;
req_payload.num_slots = 0;
}
/* work out what is required to happen with this payload */
if (mgr->payloads[i].start_slot != req_payload.start_slot ||
mgr->payloads[i].num_slots != req_payload.num_slots) {
 
/* need to push an update for this payload */
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
req_payload.payload_state = mgr->payloads[i].payload_state;
} else
req_payload.payload_state = 0;
 
mgr->payloads[i].start_slot = req_payload.start_slot;
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
}
mutex_unlock(&mgr->payload_lock);
 
return 0;
}
EXPORT_SYMBOL(drm_dp_update_payload_part1);
 
/**
* drm_dp_update_payload_part2() - Execute payload update part 2
* @mgr: manager to use.
*
* This iterates over all proposed virtual channels, and tries to
* allocate space in the link for them. For 0->slots transitions,
* this step writes the remote VC payload commands. For slots->0
* this just resets some internal state.
*/
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_mst_port *port;
int i;
int ret = 0;
mutex_lock(&mgr->payload_lock);
for (i = 0; i < mgr->max_payloads; i++) {
 
if (!mgr->proposed_vcpis[i])
continue;
 
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
 
DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
}
if (ret) {
mutex_unlock(&mgr->payload_lock);
return ret;
}
}
mutex_unlock(&mgr->payload_lock);
return 0;
}
EXPORT_SYMBOL(drm_dp_update_payload_part2);
 
#if 0 /* unused as of yet */
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size)
{
int len;
struct drm_dp_sideband_msg_tx *txmsg;
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
 
len = build_dpcd_read(txmsg, port->port_num, 0, 8);
txmsg->dst = port->parent;
 
drm_dp_queue_down_tx(mgr, txmsg);
 
return 0;
}
#endif
 
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
int len;
int ret;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
 
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb)
return -EINVAL;
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto fail_put;
}
 
len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
txmsg->dst = mstb;
 
drm_dp_queue_down_tx(mgr, txmsg);
 
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == 1) {
ret = -EINVAL;
} else
ret = 0;
}
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
return ret;
}
 
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
{
struct drm_dp_sideband_msg_reply_body reply;
 
reply.reply_type = 1;
reply.req_type = req_type;
drm_dp_encode_sideband_reply(&reply, msg);
return 0;
}
 
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
int req_type, int seqno, bool broadcast)
{
struct drm_dp_sideband_msg_tx *txmsg;
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
 
txmsg->dst = mstb;
txmsg->seqno = seqno;
drm_dp_encode_up_ack_reply(txmsg, req_type);
 
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
if (!mgr->tx_up_in_progress) {
process_single_up_tx_qlock(mgr);
}
mutex_unlock(&mgr->qlock);
return 0;
}
 
static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
{
switch (dp_link_bw) {
case DP_LINK_BW_1_62:
return 3 * dp_link_count;
case DP_LINK_BW_2_7:
return 5 * dp_link_count;
case DP_LINK_BW_5_4:
return 10 * dp_link_count;
}
return 0;
}
 
/**
* drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
* @mgr: manager to set state for
* @mst_state: true to enable MST on this connector - false to disable.
*
* This is called by the driver when it detects an MST capable device plugged
* into a DP MST capable port, or when a DP MST capable device is unplugged.
*/
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
struct drm_dp_mst_branch *mstb = NULL;
 
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
 
mgr->mst_state = mst_state;
/* set the device into MST mode */
if (mst_state) {
WARN_ON(mgr->mst_primary);
 
/* get dpcd info */
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (ret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("failed to read DPCD\n");
goto out_unlock;
}
 
mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
mgr->total_pbn = 2560;
mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
mgr->avail_slots = mgr->total_slots;
 
/* add initial branch device at LCT 1 */
mstb = drm_dp_add_mst_branch_device(1, NULL);
if (mstb == NULL) {
ret = -ENOMEM;
goto out_unlock;
}
mstb->mgr = mgr;
 
/* give this the main reference */
mgr->mst_primary = mstb;
kref_get(&mgr->mst_primary->kref);
 
{
struct drm_dp_payload reset_pay;
reset_pay.start_slot = 0;
reset_pay.num_slots = 0x3f;
drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
}
 
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
 
 
/* sort out guid */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
if (ret != 16) {
DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
goto out_unlock;
}
 
mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
if (!mgr->guid_valid) {
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
mgr->guid_valid = true;
}
 
// queue_work(system_long_wq, &mgr->work);
 
ret = 0;
} else {
/* disable MST on the device */
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
}
 
out_unlock:
mutex_unlock(&mgr->lock);
if (mstb)
drm_dp_put_mst_branch_device(mstb);
return ret;
 
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
 
/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
*
* This function tells the MST device that we can't handle UP messages
* anymore. This should stop it from sending any since we are suspended.
*/
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
 
/**
* drm_dp_mst_topology_mgr_resume() - resume the MST manager
* @mgr: manager to resume
*
* This will fetch DPCD and see if the device is still there,
* if it is, it will rewrite the MSTM control bits, and return.
*
* if the device fails this returns -1, and the driver should do
* a full MST reprobe, in case we were undocked.
*/
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
 
mutex_lock(&mgr->lock);
 
if (mgr->mst_primary) {
int sret;
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
ret = -1;
goto out_unlock;
}
 
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
ret = -1;
goto out_unlock;
}
ret = 0;
} else
ret = -1;
 
out_unlock:
mutex_unlock(&mgr->lock);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
 
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
int replylen, origlen, curreply;
int ret;
struct drm_dp_sideband_msg_rx *msg;
int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
 
len = min(mgr->max_dpcd_transaction_bytes, 16);
ret = drm_dp_dpcd_read(mgr->aux, basereg,
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
return;
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
return;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
 
origlen = replylen;
replylen -= len;
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read a chunk\n");
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
if (ret == false)
DRM_DEBUG_KMS("failed to build sideband msg\n");
curreply += len;
replylen -= len;
}
}
 
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
 
drm_dp_get_one_sb_msg(mgr, false);
 
if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
int slot = -1;
mstb = drm_dp_get_mst_branch_device(mgr,
mgr->down_rep_recv.initial_hdr.lct,
mgr->down_rep_recv.initial_hdr.rad);
 
if (!mstb) {
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
 
/* find the message */
slot = mgr->down_rep_recv.initial_hdr.seqno;
mutex_lock(&mgr->qlock);
txmsg = mstb->tx_slots[slot];
/* remove from slots */
mutex_unlock(&mgr->qlock);
 
if (!txmsg) {
DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
mstb,
mgr->down_rep_recv.initial_hdr.seqno,
mgr->down_rep_recv.initial_hdr.lct,
mgr->down_rep_recv.initial_hdr.rad[0],
mgr->down_rep_recv.msg[0]);
drm_dp_put_mst_branch_device(mstb);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
 
drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
if (txmsg->reply.reply_type == 1) {
DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
}
 
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
drm_dp_put_mst_branch_device(mstb);
 
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[slot] = NULL;
mutex_unlock(&mgr->qlock);
 
// wake_up(&mgr->tx_waitq);
}
return ret;
}
 
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
drm_dp_get_one_sb_msg(mgr, true);
 
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_mst_branch *mstb;
bool seqno;
mstb = drm_dp_get_mst_branch_device(mgr,
mgr->up_req_recv.initial_hdr.lct,
mgr->up_req_recv.initial_hdr.rad);
if (!mstb) {
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
 
seqno = mgr->up_req_recv.initial_hdr.seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
 
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
drm_dp_update_port(mstb, &msg.u.conn_stat);
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
(*mgr->cbs->hotplug)(mgr);
 
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
 
drm_dp_put_mst_branch_device(mstb);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
return ret;
}
 
/**
* drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
* @mgr: manager to notify irq for.
* @esi: 4 bytes from SINK_COUNT_ESI
*
* This should be called from the driver when it detects a short IRQ,
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
* topology manager will process the sideband messages received as a result
* of this.
*/
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
{
int ret = 0;
int sc;
*handled = false;
sc = esi[0] & 0x3f;
 
if (sc != mgr->sink_count) {
mgr->sink_count = sc;
*handled = true;
}
 
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;
}
 
if (esi[1] & DP_UP_REQ_MSG_RDY) {
ret |= drm_dp_mst_handle_up_req(mgr);
*handled = true;
}
 
drm_dp_mst_kick_tx(mgr);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
 
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @mgr: manager for this port
* @port: unverified pointer to a port
*
* This returns the current connection state for a port. It validates the
* port pointer still exists so the caller doesn't require a reference
*/
enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
enum drm_connector_status status = connector_status_disconnected;
 
/* we need to search for the port in the mgr in case its gone */
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
return connector_status_disconnected;
 
if (!port->ddps)
goto out;
 
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
case DP_PEER_DEVICE_MST_BRANCHING:
break;
 
case DP_PEER_DEVICE_SST_SINK:
status = connector_status_connected;
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
status = connector_status_connected;
break;
}
out:
drm_dp_put_port(port);
return status;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
 
/**
* drm_dp_mst_get_edid() - get EDID for an MST port
* @connector: toplevel connector to get EDID for
* @mgr: manager for this port
* @port: unverified pointer to a port.
*
* This returns an EDID for the port connected to a connector,
* It validates the pointer still exists so the caller doesn't require a
* reference.
*/
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
struct edid *edid = NULL;
 
/* we need to search for the port in the mgr in case its gone */
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
return NULL;
 
edid = drm_get_edid(connector, &port->aux.ddc);
drm_dp_put_port(port);
return edid;
}
EXPORT_SYMBOL(drm_dp_mst_get_edid);
 
/**
* drm_dp_find_vcpi_slots() - find slots for this PBN value
* @mgr: manager to use
* @pbn: payload bandwidth to convert into slots.
*/
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
int pbn)
{
int num_slots;
 
num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
 
if (num_slots > mgr->avail_slots)
return -ENOSPC;
return num_slots;
}
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
 
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi, int pbn)
{
int num_slots;
int ret;
 
num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
 
if (num_slots > mgr->avail_slots)
return -ENOSPC;
 
vcpi->pbn = pbn;
vcpi->aligned_pbn = num_slots * mgr->pbn_div;
vcpi->num_slots = num_slots;
 
ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
if (ret < 0)
return ret;
return 0;
}
 
/**
* drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
* @mgr: manager for this port
* @port: port to allocate a virtual channel for.
* @pbn: payload bandwidth number to request
* @slots: returned number of slots for this PBN.
*/
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
{
int ret;
 
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
return false;
 
if (port->vcpi.vcpi > 0) {
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
*slots = port->vcpi.num_slots;
return true;
}
}
 
ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
if (ret) {
DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
goto out;
}
DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
*slots = port->vcpi.num_slots;
 
drm_dp_put_port(port);
return true;
out:
return false;
}
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
 
/**
* drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
* @mgr: manager for this port
* @port: unverified pointer to a port.
*
* This just resets the number of slots for the ports VCPI for later programming.
*/
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
return;
port->vcpi.num_slots = 0;
drm_dp_put_port(port);
}
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
 
/**
* drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
* @mgr: manager for this port
* @port: unverified port to deallocate vcpi for
*/
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
return;
 
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
port->vcpi.num_slots = 0;
port->vcpi.pbn = 0;
port->vcpi.aligned_pbn = 0;
port->vcpi.vcpi = 0;
drm_dp_put_port(port);
}
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
 
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id, struct drm_dp_payload *payload)
{
u8 payload_alloc[3], status;
int ret;
int retries = 0;
 
drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
DP_PAYLOAD_TABLE_UPDATED);
 
payload_alloc[0] = id;
payload_alloc[1] = payload->start_slot;
payload_alloc[2] = payload->num_slots;
 
ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
if (ret != 3) {
DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
goto fail;
}
 
retry:
ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
goto fail;
}
 
if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
retries++;
if (retries < 20) {
usleep_range(10000, 20000);
goto retry;
}
DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
ret = -EINVAL;
goto fail;
}
ret = 0;
fail:
return ret;
}
 
 
/**
* drm_dp_check_act_status() - Check ACT handled status.
* @mgr: manager to use
*
* Check the payload status bits in the DPCD for ACT handled completion.
*/
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
{
u8 status;
int ret;
int count = 0;
 
do {
ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
 
if (ret < 0) {
DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
goto fail;
}
 
if (status & DP_PAYLOAD_ACT_HANDLED)
break;
count++;
udelay(100);
 
} while (count < 30);
 
if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
ret = -EINVAL;
goto fail;
}
return 0;
fail:
return ret;
}
EXPORT_SYMBOL(drm_dp_check_act_status);
 
/**
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
* @clock: dot clock for the mode
* @bpp: bpp for the mode.
*
* This uses the formula in the spec to calculate the PBN value for a mode.
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
fixed20_12 pix_bw;
fixed20_12 fbpp;
fixed20_12 result;
fixed20_12 margin, tmp;
u32 res;
 
pix_bw.full = dfixed_const(clock);
fbpp.full = dfixed_const(bpp);
tmp.full = dfixed_const(8);
fbpp.full = dfixed_div(fbpp, tmp);
 
result.full = dfixed_mul(pix_bw, fbpp);
margin.full = dfixed_const(54);
tmp.full = dfixed_const(64);
margin.full = dfixed_div(margin, tmp);
result.full = dfixed_div(result, margin);
 
margin.full = dfixed_const(1006);
tmp.full = dfixed_const(1000);
margin.full = dfixed_div(margin, tmp);
result.full = dfixed_mul(result, margin);
 
result.full = dfixed_div(result, tmp);
result.full = dfixed_ceil(result);
res = dfixed_trunc(result);
return res;
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
 
static int test_calc_pbn_mode(void)
{
int ret;
ret = drm_dp_calc_pbn_mode(154000, 30);
if (ret != 689)
return -EINVAL;
ret = drm_dp_calc_pbn_mode(234000, 30);
if (ret != 1047)
return -EINVAL;
return 0;
}
 
/* we want to kick the TX after we've ack the up/down IRQs. */
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
{
// queue_work(system_long_wq, &mgr->tx_work);
}
 
static void drm_dp_mst_dump_mstb(struct seq_file *m,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
int tabs = mstb->lct;
char prefix[10];
int i;
 
for (i = 0; i < tabs; i++)
prefix[i] = '\t';
prefix[i] = '\0';
 
// seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
// list_for_each_entry(port, &mstb->ports, next) {
// seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
// if (port->mstb)
// drm_dp_mst_dump_mstb(m, port->mstb);
// }
}
 
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf)
{
int ret;
int i;
for (i = 0; i < 4; i++) {
ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
if (ret != 16)
break;
}
if (i == 4)
return true;
return false;
}
 
/**
* drm_dp_mst_dump_topology(): dump topology to seq file.
* @m: seq_file to dump output to
* @mgr: manager to dump current topology for.
*
* helper to dump MST topology to a seq file for debugfs.
*/
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr)
{
int i;
struct drm_dp_mst_port *port;
mutex_lock(&mgr->lock);
if (mgr->mst_primary)
drm_dp_mst_dump_mstb(m, mgr->mst_primary);
 
/* dump VCPIs */
mutex_unlock(&mgr->lock);
 
 
 
}
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
 
static void drm_dp_tx_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
mutex_lock(&mgr->qlock);
if (mgr->tx_down_in_progress)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
 
/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
* @dev: device providing this structure - for i2c addition.
* @aux: DP helper aux channel to talk to this device
* @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
* @max_payloads: maximum number of payloads this GPU can source
* @conn_base_id: the connector object ID the MST device is connected to.
*
* Return 0 for success, or negative error code on failure
*/
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes,
int max_payloads, int conn_base_id)
{
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
// init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev;
mgr->aux = aux;
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
mgr->max_payloads = max_payloads;
mgr->conn_base_id = conn_base_id;
mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
if (!mgr->payloads)
return -ENOMEM;
mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
if (!mgr->proposed_vcpis)
return -ENOMEM;
set_bit(0, &mgr->payload_mask);
test_calc_pbn_mode();
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
 
/**
* drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
* @mgr: manager to destroy
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
mgr->payloads = NULL;
kfree(mgr->proposed_vcpis);
mgr->proposed_vcpis = NULL;
mutex_unlock(&mgr->payload_lock);
mgr->dev = NULL;
mgr->aux = NULL;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
 
/* I2C device */
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num)
{
struct drm_dp_aux *aux = adapter->algo_data;
struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
unsigned int i;
bool reading = false;
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret;
 
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb)
return -EREMOTEIO;
 
/* construct i2c msg */
/* see if last msg is a read */
if (msgs[num - 1].flags & I2C_M_RD)
reading = true;
 
if (!reading) {
DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
ret = -EIO;
goto out;
}
 
msg.req_type = DP_REMOTE_I2C_READ;
msg.u.i2c_read.num_transactions = num - 1;
msg.u.i2c_read.port_number = port->port_num;
for (i = 0; i < num - 1; i++) {
msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
}
msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto out;
}
 
txmsg->dst = mstb;
drm_dp_encode_sideband_req(&msg, txmsg);
 
drm_dp_queue_down_tx(mgr, txmsg);
 
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
 
if (txmsg->reply.reply_type == 1) { /* got a NAK back */
ret = -EREMOTEIO;
goto out;
}
if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
ret = -EIO;
goto out;
}
memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
ret = num;
}
out:
kfree(txmsg);
drm_dp_put_mst_branch_device(mstb);
return ret;
}
 
static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR;
}
 
static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
.functionality = drm_dp_mst_i2c_functionality,
.master_xfer = drm_dp_mst_i2c_xfer,
};
 
/**
* drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
* @aux: DisplayPort AUX channel
*
* Returns 0 on success or a negative error code on failure.
*/
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
{
aux->ddc.algo = &drm_dp_mst_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
 
aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
 
return i2c_add_adapter(&aux->ddc);
}
 
/**
* drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
* @aux: DisplayPort AUX channel
*/
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
{
i2c_del_adapter(&aux->ddc);
}
/drivers/video/drm/drm_drv.c
1,31 → 1,11
/**
* \file drm_drv.c
* Generic driver template
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*
* To use this template, you must at least define the following (samples
* given for the MGA driver):
*
* \code
* #define DRIVER_AUTHOR "VA Linux Systems, Inc."
*
* #define DRIVER_NAME "mga"
* #define DRIVER_DESC "Matrox G200/G400"
* #define DRIVER_DATE "20001127"
*
* #define drm_x mga_##x
* \endcode
*/
 
/*
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
* Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author Rickard E. (Rik) Faith <faith@valinux.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
40,10 → 20,10
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/slab.h>
/drivers/video/drm/drm_edid.c
70,6 → 70,8
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
/* Force 8bpc */
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
 
struct detailed_mode_closure {
struct drm_connector *connector;
125,6 → 127,9
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
 
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
{ "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
 
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 
984,9 → 989,13
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
 
/*
* Sanity check the header of the base EDID block. Return 8 if the header
* is perfect, down to 0 if it's totally wrong.
/**
* drm_edid_header_is_valid - sanity check the header of the base EDID block
* @raw_edid: pointer to raw base EDID block
*
* Sanity check the header of the base EDID block.
*
* Return: 8 if the header is perfect, down to 0 if it's totally wrong.
*/
int drm_edid_header_is_valid(const u8 *raw_edid)
{
1005,9 → 1014,16
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
 
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @raw_edid: pointer to raw EDID block
* @block: type of block to validate (0 for base, extension otherwise)
* @print_bad_edid: if true, dump bad EDID blocks to the console
*
* Validate a base or extension EDID block and optionally dump bad blocks to
* the console.
*
* Return: True if the block is valid, false otherwise.
*/
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
1077,6 → 1093,8
* @edid: EDID data
*
* Sanity-check an entire EDID record (including extensions)
*
* Return: True if the EDID data is valid, false otherwise.
*/
bool drm_edid_is_valid(struct edid *edid)
{
1096,14 → 1114,15
 
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
* drm_do_probe_ddc_edid() - get EDID information via I2C
* @adapter: I2C device adaptor
* @buf: EDID data buffer to be filled
* @block: 128 byte EDID block to start fetching from
* @len: EDID data buffer length to fetch
*
* \param adapter : i2c device adaptor
* \param buf : EDID data buffer to be filled
* \param len : EDID data buffer length
* \return 0 on success or -1 on failure.
* Try to fetch EDID information by calling I2C driver functions.
*
* Try to fetch EDID information by calling i2c driver function.
* Return: 0 on success or -1 on failure.
*/
static int
drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
1114,7 → 1133,8
unsigned char xfers = segment ? 3 : 2;
int ret, retries = 5;
 
/* The core i2c driver will automatically retry the transfer if the
/*
* The core I2C driver will automatically retry the transfer if the
* adapter reports EAGAIN. However, we find that bit-banging transfers
* are susceptible to errors under a heavily loaded machine and
* generate spurious NAKs and timeouts. Retrying the transfer
1141,8 → 1161,8
};
 
/*
* Avoid sending the segment addr to not upset non-compliant ddc
* monitors.
* Avoid sending the segment addr to not upset non-compliant
* DDC monitors.
*/
ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
 
1212,7 → 1232,7
if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
connector->name, j);
 
connector->bad_edid_counter++;
}
1232,7 → 1252,7
carp:
if (print_bad_edid) {
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
connector->name, j);
}
connector->bad_edid_counter++;
 
1242,10 → 1262,10
}
 
/**
* Probe DDC presence.
* drm_probe_ddc() - probe DDC presence
* @adapter: I2C adapter to probe
*
* \param adapter : i2c device adaptor
* \return 1 on success
* Return: True on success, false on failure.
*/
bool
drm_probe_ddc(struct i2c_adapter *adapter)
1259,12 → 1279,12
/**
* drm_get_edid - get EDID data, if available
* @connector: connector we're probing
* @adapter: i2c adapter to use for DDC
* @adapter: I2C adapter to use for DDC
*
* Poke the given i2c channel to grab EDID data if possible. If found,
* Poke the given I2C channel to grab EDID data if possible. If found,
* attach it to the connector.
*
* Return edid data or NULL if we couldn't find any.
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
1282,7 → 1302,7
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
*
* Return duplicate edid or NULL on allocation failure.
* Return: Pointer to duplicated EDID or NULL on allocation failure.
*/
struct edid *drm_edid_duplicate(const struct edid *edid)
{
1405,7 → 1425,8
* @rb: Mode reduced-blanking-ness
*
* Walk the DMT mode list looking for a match for the given parameters.
* Return a newly allocated copy of the mode, or NULL if not found.
*
* Return: A newly allocated copy of the mode, or NULL if not found.
*/
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
1586,8 → 1607,9
 
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
* @connector: connector of for the EDID block
* @edid: EDID block to scan
* @t: standard timing params
* @timing_level: standard timing level
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
1594,7 → 1616,7
*/
static struct drm_display_mode *
drm_mode_std(struct drm_connector *connector, struct edid *edid,
struct std_timing *t, int revision)
struct std_timing *t)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *m, *mode = NULL;
1615,7 → 1637,7
vrefresh_rate = vfreq + 60;
/* the vdisplay is calculated based on the aspect ratio */
if (aspect_ratio == 0) {
if (revision < 3)
if (edid->revision < 3)
vsize = hsize;
else
vsize = (hsize * 10) / 16;
2132,6 → 2154,7
 
/**
* add_established_modes - get est. modes from EDID and add them
* @connector: connector to add mode(s) to
* @edid: EDID block to scan
*
* Each EDID block contains a bitmap of the supported "established modes" list
2182,8 → 2205,7
struct drm_display_mode *newmode;
 
std = &data->data.timings[i];
newmode = drm_mode_std(connector, edid, std,
edid->revision);
newmode = drm_mode_std(connector, edid, std);
if (newmode) {
drm_mode_probed_add(connector, newmode);
closure->modes++;
2194,6 → 2216,7
 
/**
* add_standard_modes - get std. modes from EDID and add them
* @connector: connector to add mode(s) to
* @edid: EDID block to scan
*
* Standard modes can be calculated using the appropriate standard (DMT,
2211,8 → 2234,7
struct drm_display_mode *newmode;
 
newmode = drm_mode_std(connector, edid,
&edid->standard_timings[i],
edid->revision);
&edid->standard_timings[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
2415,7 → 2437,7
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
*
* Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
* Return: The CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
* mode.
*/
u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2442,6 → 2464,22
}
EXPORT_SYMBOL(drm_match_cea_mode);
 
/**
* drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
* the input VIC from the CEA mode list
* @video_code: ID given to each of the CEA modes
*
* Returns picture aspect ratio
*/
enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
/* return picture aspect ratio for video_code - 1 to access the
* right array element
*/
return edid_cea_modes[video_code-1].picture_aspect_ratio;
}
EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
 
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
* specific block).
2580,6 → 2618,9
return NULL;
 
newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
if (!newmode)
return NULL;
 
newmode->vrefresh = 0;
 
return newmode;
3010,11 → 3051,9
* @connector: connector corresponding to the HDMI/DP sink
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
* Some ELD fields are left to the graphics driver caller:
* - Conn_Type
* - HDCP
* - Port_ID
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
* Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
* fill in.
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
3098,9 → 3137,10
* @sads: pointer that will be set to the extracted SADs
*
* Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
* Note: returned pointer needs to be kfreed
*
* Return number of found SADs or negative number on error.
* Note: The returned pointer needs to be freed using kfree().
*
* Return: The number of found SADs or negative number on error.
*/
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
{
3157,9 → 3197,11
* @sadb: pointer to the speaker block
*
* Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
* Note: returned pointer needs to be kfreed
*
* Return number of found Speaker Allocation Blocks or negative number on error.
* Note: The returned pointer needs to be freed using kfree().
*
* Return: The number of found Speaker Allocation Blocks or negative number on
* error.
*/
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
{
3191,10 → 3233,9
 
/* Speaker Allocation Data Block */
if (dbl == 3) {
*sadb = kmalloc(dbl, GFP_KERNEL);
*sadb = kmemdup(&db[1], dbl, GFP_KERNEL);
if (!*sadb)
return -ENOMEM;
memcpy(*sadb, &db[1], dbl);
count = dbl;
break;
}
3206,9 → 3247,12
EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
 
/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* drm_av_sync_delay - compute the HDMI/DP sink audio-video sync delay
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
*
* Return: The HDMI/DP sink's audio-video sync delay in milliseconds or 0 if
* the sink doesn't support audio or video.
*/
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode)
3250,6 → 3294,9
*
* It's possible for one encoder to be associated with multiple HDMI/DP sinks.
* The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
*
* Return: The connector associated with the first HDMI/DP sink that has ELD
* attached to it.
*/
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
3257,6 → 3304,9
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
return connector;
3266,11 → 3316,12
EXPORT_SYMBOL(drm_select_eld);
 
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
* drm_detect_hdmi_monitor - detect whether monitor is HDMI
* @edid: monitor EDID information
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI, false if not or unknown.
*
* Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
{
3300,6 → 3351,7
 
/**
* drm_detect_monitor_audio - check monitor audio capability
* @edid: EDID block to scan
*
* Monitor should have CEA extension block.
* If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
3307,6 → 3359,7
* audio format, assume at least 'basic audio' support, even if 'basic
* audio' is not defined in EDID.
*
* Return: True if the monitor supports audio, false otherwise.
*/
bool drm_detect_monitor_audio(struct edid *edid)
{
3345,10 → 3398,13
 
/**
* drm_rgb_quant_range_selectable - is RGB quantization range selectable?
* @edid: EDID block to scan
*
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
* which quantization range (full or limited) is used.
*
* Return: True if the RGB quantization range is selectable, false otherwise.
*/
bool drm_rgb_quant_range_selectable(struct edid *edid)
{
3375,9 → 3431,111
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
 
/**
* drm_assign_hdmi_deep_color_info - detect whether monitor supports
* hdmi deep color modes and update drm_display_info if so.
*
* @edid: monitor EDID information
* @info: Updated with maximum supported deep color bpc and color format
* if deep color supported.
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI deep color supported, false if not or unknown.
*/
static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
struct drm_display_info *info,
struct drm_connector *connector)
{
u8 *edid_ext, *hdmi;
int i;
int start_offset, end_offset;
unsigned int dc_bpc = 0;
 
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return false;
 
if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
return false;
 
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
for_each_cea_db(edid_ext, i, start_offset, end_offset) {
if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
/* HDMI supports at least 8 bpc */
info->bpc = 8;
 
hdmi = &edid_ext[i];
if (cea_db_payload_len(hdmi) < 6)
return false;
 
if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
dc_bpc = 10;
info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
connector->name);
}
 
if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
dc_bpc = 12;
info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
connector->name);
}
 
if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
dc_bpc = 16;
info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
connector->name);
}
 
if (dc_bpc > 0) {
DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
connector->name, dc_bpc);
info->bpc = dc_bpc;
 
/*
* Deep color support mandates RGB444 support for all video
* modes and forbids YCRCB422 support for all video modes per
* HDMI 1.3 spec.
*/
info->color_formats = DRM_COLOR_FORMAT_RGB444;
 
/* YCRCB444 is optional according to spec. */
if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
connector->name);
}
 
/*
* Spec says that if any deep color mode is supported at all,
* then deep color 36 bit must be supported.
*/
if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
connector->name);
}
 
return true;
}
else {
DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
connector->name);
}
}
}
 
return false;
}
 
/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
* @connector: connector whose edid is used to build display info
*
* Grab any available display info and stuff it into the drm_display_info
* structure that's part of the connector. Useful for tracking bpp and
3384,7 → 3542,8
* color spaces.
*/
static void drm_add_display_info(struct edid *edid,
struct drm_display_info *info)
struct drm_display_info *info,
struct drm_connector *connector)
{
u8 *edid_ext;
 
3414,6 → 3573,9
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
 
/* HDMI deep color modes supported? Assign to info, if so */
drm_assign_hdmi_deep_color_info(edid, info, connector);
 
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
3443,6 → 3605,9
break;
}
 
DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
connector->name, info->bpc);
 
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
3453,11 → 3618,11
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
* @edid: edid data
* @edid: EDID data
*
* Add the specified modes to the connector's mode list.
*
* Return number of modes added or 0 if we couldn't find any.
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
3469,7 → 3634,7
}
if (!drm_edid_is_valid(edid)) {
dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
connector->name);
return 0;
}
 
3501,11 → 3666,14
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
 
drm_add_display_info(edid, &connector->display_info);
drm_add_display_info(edid, &connector->display_info, connector);
 
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
 
if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12;
 
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
3519,7 → 3687,7
* Add the specified modes to the connector's mode list. Only when the
* hdisplay/vdisplay is not beyond the given limit, it will be added.
*
* Return number of modes added or 0 if we couldn't find any.
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
3558,6 → 3726,15
}
EXPORT_SYMBOL(drm_add_modes_noedid);
 
/**
* drm_set_preferred_mode - Sets the preferred mode of a connector
* @connector: connector whose mode list should be processed
* @hpref: horizontal resolution of preferred mode
* @vpref: vertical resolution of preferred mode
*
* Marks a mode as preferred if it matches the resolution specified by @hpref
* and @vpref.
*/
void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref)
{
3564,8 → 3741,8
struct drm_display_mode *mode;
 
list_for_each_entry(mode, &connector->probed_modes, head) {
if (drm_mode_width(mode) == hpref &&
drm_mode_height(mode) == vpref)
if (mode->hdisplay == hpref &&
mode->vdisplay == vpref)
mode->type |= DRM_MODE_TYPE_PREFERRED;
}
}
3577,7 → 3754,7
* @frame: HDMI AVI infoframe
* @mode: DRM display mode
*
* Returns 0 on success or a negative error code on failure.
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3598,7 → 3775,20
frame->video_code = drm_match_cea_mode(mode);
 
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
 
/*
* Populate picture aspect ratio from either
* user input (if specified) or from the CEA mode list.
*/
if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
frame->picture_aspect = mode->picture_aspect_ratio;
else if (frame->video_code > 0)
frame->picture_aspect = drm_get_cea_aspect_ratio(
frame->video_code);
 
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
 
return 0;
}
3641,7 → 3831,7
* 4k or stereoscopic 3D mode. So when giving any other mode as input this
* function will return -EINVAL, error that can be safely ignored.
*
* Returns 0 on success or a negative error code on failure.
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
/drivers/video/drm/drm_fb_helper.c
45,14 → 45,15
* DOC: fbdev helpers
*
* The fb helper functions are useful to provide an fbdev on top of a drm kernel
* mode setting driver. They can be used mostly independantely from the crtc
* mode setting driver. They can be used mostly independently from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
* Initialization is done as a three-step process with drm_fb_helper_init(),
* drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
* Drivers with fancier requirements than the default beheviour can override the
* second step with their own code. Teardown is done with drm_fb_helper_fini().
* Initialization is done as a four-step process with drm_fb_helper_prepare(),
* drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and
* drm_fb_helper_initial_config(). Drivers with fancier requirements than the
* default behaviour can override the third step with their own code.
* Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
* drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
59,10 → 60,23
* should also notify the fb helper code from updates to the output
* configuration by calling drm_fb_helper_hotplug_event(). For easier
* integration with the output polling code in drm_crtc_helper.c the modeset
* code proves a ->output_poll_changed callback.
* code provides a ->output_poll_changed callback.
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
*
* It is possible, though perhaps somewhat tricky, to implement race-free
* hotplug detection using the fbdev helpers. The drm_fb_helper_prepare()
* helper must be called first to initialize the minimum required to make
* hotplug detection work. Drivers also need to make sure to properly set up
* the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init()
* it is safe to enable interrupts and start processing hotplug events. At the
* same time, drivers should initialize all modeset objects such as CRTCs,
* encoders and connectors. To finish up the fbdev helper initialization, the
* drm_fb_helper_init() function is called. To probe for all attached displays
* and set up an initial configuration using the detected hardware, drivers
* should call drm_fb_helper_single_add_all_connectors() followed by
* drm_fb_helper_initial_config().
*/
 
/**
105,6 → 119,57
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
 
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
{
struct drm_fb_helper_connector **temp;
struct drm_fb_helper_connector *fb_helper_connector;
 
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
if (!temp)
return -ENOMEM;
 
fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
fb_helper->connector_info = temp;
}
 
 
fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
if (!fb_helper_connector)
return -ENOMEM;
 
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_helper_connector;
int i, j;
 
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
 
for (i = 0; i < fb_helper->connector_count; i++) {
if (fb_helper->connector_info[i]->connector == connector)
break;
}
 
if (i == fb_helper->connector_count)
return -EINVAL;
fb_helper_connector = fb_helper->connector_info[i];
 
for (j = i + 1; j < fb_helper->connector_count; j++) {
fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
}
fb_helper->connector_count--;
kfree(fb_helper_connector);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
{
uint16_t *r_base, *g_base, *b_base;
136,6 → 201,55
}
 
 
static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
bool error = false;
int i;
 
drm_warn_on_modeset_not_all_locked(dev);
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
 
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
struct drm_crtc *crtc = mode_set->crtc;
int ret;
 
if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
error = true;
}
 
ret = drm_mode_set_config_internal(mode_set);
if (ret)
error = true;
}
return error;
}
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: fbcon to restore
*
* This should be called from driver's drm ->lastclose callback
* when implementing an fbcon on top of kms using this helper. This ensures that
* the user isn't greeted with a black screen when e.g. X dies.
*/
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
bool ret;
drm_modeset_lock_all(dev);
ret = restore_fbdev_mode(fb_helper);
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
 
static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
143,9 → 257,9
int bound = 0, crtcs_bound = 0;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
if (crtc->primary->fb)
crtcs_bound++;
if (crtc->fb == fb_helper->fb)
if (crtc->primary->fb == fb_helper->fb)
bound++;
}
 
268,6 → 382,24
}
 
/**
* drm_fb_helper_prepare - setup a drm_fb_helper structure
* @dev: DRM device
* @helper: driver-allocated fbdev helper structure to set up
* @funcs: pointer to structure of functions associate with this helper
*
* Sets up the bare minimum to make the framebuffer helper usable. This is
* useful to implement race-free initialization of the polling helpers.
*/
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
{
INIT_LIST_HEAD(&helper->kernel_fb_list);
helper->funcs = funcs;
helper->dev = dev;
}
EXPORT_SYMBOL(drm_fb_helper_prepare);
 
/**
* drm_fb_helper_init - initialize a drm_fb_helper structure
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
279,8 → 411,7
* nor register the fbdev. This is only done in drm_fb_helper_initial_config()
* to allow driver writes more control over the exact init sequence.
*
* Drivers must set fb_helper->funcs before calling
* drm_fb_helper_initial_config().
* Drivers must call drm_fb_helper_prepare() before calling this function.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
292,10 → 423,9
struct drm_crtc *crtc;
int i;
 
fb_helper->dev = dev;
if (!max_conn_count)
return -EINVAL;
 
INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
 
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!fb_helper->crtc_info)
return -ENOMEM;
306,6 → 436,7
kfree(fb_helper->crtc_info);
return -ENOMEM;
}
fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
fb_helper->connector_count = 0;
 
for (i = 0; i < crtc_count; i++) {
566,10 → 697,7
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
int ret;
int i;
 
if (var->pixclock != 0) {
DRM_ERROR("PIXEL CLOCK SET\n");
576,15 → 704,7
return -EINVAL;
}
 
drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
if (ret) {
drm_modeset_unlock_all(dev);
return ret;
}
}
drm_modeset_unlock_all(dev);
drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
 
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
770,7 → 890,6
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
 
info->fix.line_length = pitch;
return;
881,13 → 1000,13
return count;
}
 
static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
{
struct drm_display_mode *mode;
 
list_for_each_entry(mode, &fb_connector->connector->modes, head) {
if (drm_mode_width(mode) > width ||
drm_mode_height(mode) > height)
if (mode->hdisplay > width ||
mode->vdisplay > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
894,6 → 1013,7
}
return NULL;
}
EXPORT_SYMBOL(drm_has_preferred_mode);
 
static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
{
902,11 → 1022,12
return cmdline_mode->specified;
}
 
static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int width, int height)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
bool prefer_non_interlace;
 
return NULL;
 
920,6 → 1041,8
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
 
prefer_non_interlace = !cmdline_mode->interlace;
again:
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
934,10 → 1057,18
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
} else if (prefer_non_interlace) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
continue;
}
return mode;
}
 
if (prefer_non_interlace) {
prefer_non_interlace = false;
goto again;
}
 
create_mode:
mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
cmdline_mode);
944,6 → 1075,7
list_add(&mode->head, &fb_helper_conn->connector->modes);
return mode;
}
EXPORT_SYMBOL(drm_pick_cmdline_mode);
 
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
1286,9 → 1418,11
 
// drm_fb_helper_parse_command_line(fb_helper);
 
mutex_lock(&dev->mode_config.mutex);
count = drm_fb_helper_probe_connector_modes(fb_helper,
dev->mode_config.max_width,
dev->mode_config.max_height);
mutex_unlock(&dev->mode_config.mutex);
/*
* we shouldn't end up with no modes here.
*/
1314,8 → 1448,10
* either the output polling work or a work item launched from the driver's
* hotplug interrupt).
*
* Note that the driver must ensure that this is only called _after_ the fb has
* been fully set up, i.e. after the call to drm_fb_helper_initial_config.
* Note that drivers may call this even before calling
* drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows
* for a race-free fbcon setup and will make sure that the fbdev emulation will
* not miss any hotplug events.
*
* RETURNS:
* 0 on success and a non-zero error code otherwise.
1325,11 → 1461,8
struct drm_device *dev = fb_helper->dev;
u32 max_width, max_height;
 
if (!fb_helper->fb)
return 0;
 
mutex_lock(&fb_helper->dev->mode_config.mutex);
if (!drm_fb_helper_is_bound(fb_helper)) {
if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&fb_helper->dev->mode_config.mutex);
return 0;
/drivers/video/drm/drm_gem.c
82,9 → 82,9
#endif
 
/**
* Initialize the GEM device fields
* drm_gem_init - Initialize the GEM device fields
* @dev: drm_devic structure to initialize
*/
 
int
drm_gem_init(struct drm_device *dev)
{
117,6 → 117,11
}
 
/**
* drm_gem_object_init - initialize an allocated shmem-backed GEM object
* @dev: drm_device the object should be initialized for
* @obj: drm_gem_object to initialize
* @size: object size
*
* Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
138,6 → 143,11
EXPORT_SYMBOL(drm_gem_object_init);
 
/**
* drm_gem_object_init - initialize an allocated private GEM object
* @dev: drm_device the object should be initialized for
* @obj: drm_gem_object to initialize
* @size: object size
*
* Initialize an already allocated GEM object of the specified size with
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
158,6 → 168,9
EXPORT_SYMBOL(drm_gem_private_object_init);
 
/**
* drm_gem_object_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
198,7 → 211,12
}
 
/**
* Removes the mapping from handle to filp for this object.
* drm_gem_handle_delete - deletes the given file-private handle
* @filp: drm file-private structure to use for the handle look up
* @handle: userspace handle to delete
*
* Removes the GEM handle from the @filp lookup table and if this is the last
* handle also cleans up linked resources like GEM names.
*/
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
241,6 → 259,9
 
/**
* drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
* @file: drm file-private structure to remove the dumb handle from
* @dev: corresponding drm_device
* @handle: the dumb handle to remove
*
* This implements the ->dumb_destroy kms driver callback for drivers which use
* gem to manage their backing storage.
255,6 → 276,9
 
/**
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pionter to return the created handle to the caller
*
* This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when
307,6 → 331,11
}
 
/**
* gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pionter to return the created handle to the caller
*
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
383,18 → 412,31
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
* @obj: obj in question
* @gfpmask: gfp mask of requested pages
*
* This reads the page-array of the shmem-backing storage of the given gem
* object. An array of pages is returned. If a page is not allocated or
* swapped-out, this will allocate/swap-in the required pages. Note that the
* whole object is covered by the page-array and pinned in memory.
*
* Use drm_gem_put_pages() to release the array and unpin all pages.
*
* This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
* If you require other GFP-masks, you have to do those allocations yourself.
*
* Note that you are not allowed to change gfp-zones during runtime. That is,
* shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
* set during initialization. If you have special zone constraints, set them
* after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
* to keep pages in the required zone during swap-in.
*/
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
struct inode *inode;
struct address_space *mapping;
struct page *p, **pages;
int i, npages;
 
/* This is the shared memory object that backs the GEM resource */
inode = file_inode(obj->filp);
mapping = inode->i_mapping;
mapping = file_inode(obj->filp)->i_mapping;
 
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
408,31 → 450,18
if (pages == NULL)
return ERR_PTR(-ENOMEM);
 
gfpmask |= mapping_gfp_mask(mapping);
 
for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p))
goto fail;
pages[i] = p;
 
/* There is a hypothetical issue w/ drivers that require
* buffer memory in the low 4GB.. if the pages are un-
* pinned, and swapped out, they can end up swapped back
* in above 4GB. If pages are already in memory, then
* shmem_read_mapping_page_gfp will ignore the gfpmask,
* even if the already in-memory page disobeys the mask.
*
* It is only a theoretical issue today, because none of
* the devices with this limitation can be populated with
* enough memory to trigger the issue. But this BUG_ON()
* is here as a reminder in case the problem with
* shmem_read_mapping_page_gfp() isn't solved by the time
* it does become a real issue.
*
* See this thread: http://lkml.org/lkml/2011/7/11/238
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
* correct region during swapin. Note that this requires
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required.
*/
BUG_ON((gfpmask & __GFP_DMA32) &&
BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
}
 
508,6 → 537,11
EXPORT_SYMBOL(drm_gem_object_lookup);
 
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
* @dev: drm_device
* @data: ioctl data
* @file_priv: drm file-private structure
*
* Releases the handle to an mm object.
*/
int
517,6 → 551,9
struct drm_gem_close *args = data;
int ret;
 
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
ret = drm_gem_handle_delete(file_priv, args->handle);
 
return ret;
523,6 → 560,11
}
 
/**
* drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
* @dev: drm_device
* @data: ioctl data
* @file_priv: drm file-private structure
*
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
570,6 → 612,11
}
 
/**
* drm_gem_open - implementation of the GEM_OPEN ioctl
* @dev: drm_device
* @data: ioctl data
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
610,6 → 657,10
 
#if 0
/**
* gem_gem_open - initalizes GEM file-private structures at devnode open time
* @dev: drm_device which is being opened by userspace
* @file_private: drm file-private structure to set up
*
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
620,7 → 671,7
spin_lock_init(&file_private->table_lock);
}
 
/**
/*
* Called at device close to release the file's
* handle references on objects.
*/
642,6 → 693,10
}
 
/**
* drm_gem_release - release file-private GEM resources
* @dev: drm_device which is being closed by userspace
* @file_private: drm file-private structure to clean up
*
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
658,6 → 713,8
void
drm_gem_object_release(struct drm_gem_object *obj)
{
WARN_ON(obj->dma_buf);
 
if (obj->filp)
free(obj->filp);
}
664,6 → 721,9
EXPORT_SYMBOL(drm_gem_object_release);
 
/**
* drm_gem_object_free - free a GEM object
* @kref: kref of the object to free
*
* Called after the last reference to the object has been lost.
* Must be called holding struct_ mutex
*
/drivers/video/drm/drm_hashtab.c
131,7 → 131,7
parent = &entry->head;
}
if (parent) {
hlist_add_after_rcu(parent, &item->head);
hlist_add_behind_rcu(&item->head, parent);
} else {
hlist_add_head_rcu(&item->head, h_list);
}
/drivers/video/drm/drm_irq.c
1,6 → 1,5
/**
* \file drm_irq.c
* IRQ support
/*
* drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
57,7 → 56,63
*/
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
/*
* Clear vblank timestamp buffer for a crtc.
*/
 
 
#if 0
/**
* drm_vblank_init - initialize vblank support
* @dev: drm_device
* @num_crtcs: number of crtcs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = -ENOMEM;
 
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
 
dev->num_crtcs = num_crtcs;
 
dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
if (!dev->vblank)
goto err;
 
for (i = 0; i < num_crtcs; i++) {
dev->vblank[i].dev = dev;
dev->vblank[i].crtc = i;
init_waitqueue_head(&dev->vblank[i].queue);
setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
(unsigned long)&dev->vblank[i]);
}
 
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
 
/* Driver specific high-precision vblank timestamping supported? */
if (dev->driver->get_vblank_timestamp)
DRM_INFO("Driver supports precise vblank timestamp query.\n");
else
DRM_INFO("No driver support for vblank timestamp query.\n");
 
dev->vblank_disable_allowed = false;
 
return 0;
 
err:
drm_vblank_cleanup(dev);
return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
 
#endif
 
irqreturn_t device_irq_handler(struct drm_device *dev)
{
 
69,48 → 124,48
}
 
/**
* Install IRQ handler.
* drm_irq_install - install IRQ handler
* @dev: DRM device
* @irq: IRQ number to install the handler for
*
* \param dev DRM device.
* Initializes the IRQ related data. Installs the handler, calling the driver
* irq_preinstall() and irq_postinstall() functions before and after the
* installation.
*
* Initializes the IRQ related data. Installs the handler, calling the driver
* \c irq_preinstall() and \c irq_postinstall() functions
* before and after the installation.
* This is the simplified helper interface provided for drivers with no special
* needs. Drivers which need to install interrupt handlers for multiple
* interrupts must instead set drm_device->irq_enabled to signal the DRM core
* that vblank interrupts are available.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_irq_install(struct drm_device *dev)
int drm_irq_install(struct drm_device *dev, int irq)
{
int ret;
unsigned long sh_flags = 0;
char *irqname;
 
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
 
if (drm_dev_to_irq(dev) == 0)
if (irq == 0)
return -EINVAL;
 
mutex_lock(&dev->struct_mutex);
 
/* Driver must have been initialized */
if (!dev->dev_private) {
mutex_unlock(&dev->struct_mutex);
if (!dev->dev_private)
return -EINVAL;
}
 
if (dev->irq_enabled) {
mutex_unlock(&dev->struct_mutex);
if (dev->irq_enabled)
return -EBUSY;
}
dev->irq_enabled = true;
mutex_unlock(&dev->struct_mutex);
 
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
DRM_DEBUG("irq=%d\n", irq);
 
/* Before installing handler */
if (dev->driver->irq_preinstall)
dev->driver->irq_preinstall(dev);
 
ret = !AttachIntHandler(drm_dev_to_irq(dev), device_irq_handler, (u32)dev);
ret = !AttachIntHandler(irq, device_irq_handler, (u32)dev);
 
/* After installing handler */
if (dev->driver->irq_postinstall)
117,8 → 172,10
ret = dev->driver->irq_postinstall(dev);
 
if (ret < 0) {
dev->irq_enabled = 0;
dev->irq_enabled = false;
DRM_ERROR(__FUNCTION__);
} else {
dev->irq = irq;
}
 
u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
149,15 → 206,14
}
 
/**
* drm_calc_timestamping_constants - Calculate vblank timestamp constants
* drm_calc_timestamping_constants - calculate vblank timestamp constants
* @crtc: drm_crtc whose timestamp constants should be updated.
* @mode: display mode containing the scanout timings
*
* @crtc drm_crtc whose timestamp constants should be updated.
* @mode display mode containing the scanout timings
*
* Calculate and store various constants which are later
* needed by vblank and swap-completion timestamping, e.g,
* by drm_calc_vbltimestamp_from_scanoutpos(). They are
* derived from crtc's true scanout timing, so they take
* derived from CRTC's true scanout timing, so they take
* things like panel scaling or other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
202,12 → 258,23
EXPORT_SYMBOL(drm_calc_timestamping_constants);
 
/**
* drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
* drivers. Implements calculation of exact vblank timestamps from
* given drm_display_mode timings and current video scanout position
* of a crtc. This can be called from within get_vblank_timestamp()
* implementation of a kms driver to implement the actual timestamping.
* drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
* @dev: DRM device
* @crtc: Which CRTC's vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to struct timeval which should receive the timestamp
* @flags: Flags to pass to driver:
* 0 = Default,
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
* @refcrtc: CRTC which defines scanout timing
* @mode: mode which defines the scanout timings
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
* timings and current video scanout position of a CRTC. This can be called from
* within get_vblank_timestamp() implementation of a kms driver to implement the
* actual timestamping.
*
* Should return timestamps conforming to the OML_sync_control OpenML
* extension specification. The timestamp corresponds to the end of
* the vblank interval, aka start of scanout of topmost-leftmost display
221,21 → 288,11
* returns as no operation if a doublescan or interlaced video mode is
* active. Higher level code is expected to handle this.
*
* @dev: DRM device.
* @crtc: Which crtc's vblank timestamp to retrieve.
* @max_error: Desired maximum allowable error in timestamps (nanosecs).
* On return contains true maximum error of timestamp.
* @vblank_time: Pointer to struct timeval which should receive the timestamp.
* @flags: Flags to pass to driver:
* 0 = Default.
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
* @refcrtc: drm_crtc* of crtc which defines scanout timing.
* @mode: mode which defines the scanout timings
*
* Returns negative value on error, failure or if not supported in current
* Returns:
* Negative value on error, failure or if not supported in current
* video mode:
*
* -EINVAL - Invalid crtc.
* -EINVAL - Invalid CRTC.
* -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
* -ENOTSUPP - Function not supported in current display mode.
* -EIO - Failed, e.g., due to failed scanout position query.
287,8 → 344,87
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
* stored so that drm_vblank_on() can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending.
*
* This is the legacy version of drm_crtc_vblank_off().
*/
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
unsigned int seq;
 
 
}
EXPORT_SYMBOL(drm_vblank_off);
 
/**
* drm_crtc_vblank_off - disable vblank events on a CRTC
* @crtc: CRTC in question
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
* stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
* reset, e.g. when suspending.
*
* This is the native kms version of drm_vblank_off().
*/
void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_off);
 
/**
* drm_vblank_on - enable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
* drm_vblank_off() can be unbalanced and so can also be unconditionaly called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the legacy version of drm_crtc_vblank_on().
*/
void drm_vblank_on(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
 
}
EXPORT_SYMBOL(drm_vblank_on);
 
/**
* drm_crtc_vblank_on - enable vblank events on a CRTC
* @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
* drm_vblank_off() can be unbalanced and so can also be unconditionaly called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the native kms version of drm_vblank_on().
*/
void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_on);
 
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
295,6 → 431,21
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
*
* This is done by grabbing a temporary vblank reference to ensure that the
* vblank interrupt keeps running across the modeset sequence. With this the
* software-side vblank frame counting will ensure that there are no jumps or
* discontinuities.
*
* Unfortunately this approach is racy and also doesn't work when the vblank
* interrupt stops running, e.g. across system suspend resume. It is therefore
* highly recommended that drivers use the newer drm_vblank_off() and
* drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
* using "cooked" software vblank frame counters and not relying on any hardware
* counters.
*
* Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
* again.
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
318,6 → 469,14
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
 
/**
* drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
* @dev: DRM device
* @crtc: CRTC in question
*
* This function again drops the temporary vblank reference acquired in
* drm_vblank_pre_modeset.
*/
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
#if 0
/drivers/video/drm/drm_mm.c
47,7 → 47,48
#include <linux/seq_file.h>
#include <linux/export.h>
 
#define MM_UNUSED_TARGET 4
/**
* DOC: Overview
*
* drm_mm provides a simple range allocator. The drivers are free to use the
* resource allocator from the linux core if it suits them, the upside of drm_mm
* is that it's in the DRM core. Which means that it's easier to extend for
* some of the crazier special purpose needs of gpus.
*
* The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
* Drivers are free to embed either of them into their own suitable
* datastructures. drm_mm itself will not do any allocations of its own, so if
* drivers choose not to embed nodes they need to still allocate them
* themselves.
*
* The range allocator also supports reservation of preallocated blocks. This is
* useful for taking over initial mode setting configurations from the firmware,
* where an object needs to be created which exactly matches the firmware's
* scanout target. As long as the range is still free it can be inserted anytime
* after the allocator is initialized, which helps with avoiding looped
* depencies in the driver load sequence.
*
* drm_mm maintains a stack of most recently freed holes, which of all
* simplistic datastructures seems to be a fairly decent approach to clustering
* allocations and avoiding too much fragmentation. This means free space
* searches are O(num_holes). Given that all the fancy features drm_mm supports
* something better would be fairly complex and since gfx thrashing is a fairly
* steep cliff not a real concern. Removing a node again is O(1).
*
* drm_mm supports a few features: Alignment and range restrictions can be
* supplied. Further more every &drm_mm_node has a color value (which is just an
* opaqua unsigned long) which in conjunction with a driver callback can be used
* to implement sophisticated placement restrictions. The i915 DRM driver uses
* this to implement guard pages between incompatible caching domains in the
* graphics TT.
*
* Two behaviors are supported for searching and allocating: bottom-up and top-down.
* The default is bottom-up. Top-down allocation can be used if the memory area
* has different restrictions, or just to reduce fragmentation.
*
* Finally iteration helpers to walk all nodes and all holes are provided as are
* some basic allocator dumpers for debugging.
*/
 
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
65,7 → 106,8
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color)
unsigned long color,
enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
78,12 → 120,22
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
if (flags & DRM_MM_CREATE_TOP)
adj_start = adj_end - size;
 
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp)
if (tmp) {
if (flags & DRM_MM_CREATE_TOP)
adj_start -= tmp;
else
adj_start += alignment - tmp;
}
}
 
BUG_ON(adj_start < hole_start);
BUG_ON(adj_end > hole_end);
 
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack);
107,6 → 159,20
}
}
 
/**
* drm_mm_reserve_node - insert an pre-initialized node
* @mm: drm_mm allocator to insert @node into
* @node: drm_mm_node to insert
*
* This functions inserts an already set-up drm_mm_node into the allocator,
* meaning that start, size and color must be set by the caller. This is useful
* to initialize the allocator with preallocated objects which must be set-up
* before the range allocator can be set-up, e.g. when taking over a firmware
* framebuffer.
*
* Returns:
* 0 on success, -ENOSPC if there's no hole where @node is.
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
141,30 → 207,39
return 0;
}
 
WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
node->start, node->size);
return -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
 
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
* drm_mm_insert_node_generic - search for space and insert @node
* @mm: drm_mm to allocate from
* @node: preallocate node to insert
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for this node
* @sflags: flags to fine-tune the allocation search
* @aflags: flags to fine-tune the allocation behavior
*
* The preallocated node must be cleared to 0.
*
* Returns:
* 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags)
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags)
{
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, flags);
color, sflags);
if (!hole_node)
return -ENOSPC;
 
drm_mm_insert_helper(hole_node, node, size, alignment, color);
drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
173,7 → 248,8
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color,
unsigned long start, unsigned long end)
unsigned long start, unsigned long end,
enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
188,14 → 264,21
if (adj_end > end)
adj_end = end;
 
if (flags & DRM_MM_CREATE_TOP)
adj_start = adj_end - size;
 
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp)
if (tmp) {
if (flags & DRM_MM_CREATE_TOP)
adj_start -= tmp;
else
adj_start += alignment - tmp;
}
}
 
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
211,6 → 294,8
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
 
BUG_ON(node->start < start);
BUG_ON(node->start < adj_start);
BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end);
 
222,32 → 307,51
}
 
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
* drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
* @mm: drm_mm to allocate from
* @node: preallocate node to insert
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for this node
* @start: start of the allowed range for this node
* @end: end of the allowed range for this node
* @sflags: flags to fine-tune the allocation search
* @aflags: flags to fine-tune the allocation behavior
*
* The preallocated node must be cleared to 0.
*
* Returns:
* 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
unsigned long size, unsigned alignment,
unsigned long color,
unsigned long start, unsigned long end,
enum drm_mm_search_flags flags)
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags)
{
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
start, end, flags);
start, end, sflags);
if (!hole_node)
return -ENOSPC;
 
drm_mm_insert_helper_range(hole_node, node,
size, alignment, color,
start, end);
start, end, aflags);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
/**
* Remove a memory node from the allocator.
* drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove
*
* This just removes a node from its drm_mm allocator. The node does not need to
* be cleared again before it can be re-inserted into this or any other drm_mm
* allocator. It is a bug to call this function on a un-allocated node.
*/
void drm_mm_remove_node(struct drm_mm_node *node)
{
315,7 → 419,10
best = NULL;
best_size = ~0UL;
 
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
unsigned long hole_size = adj_end - adj_start;
 
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
328,9 → 435,9
if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
 
if (entry->size < best_size) {
if (hole_size < best_size) {
best = entry;
best_size = entry->size;
best_size = hole_size;
}
}
 
356,7 → 463,10
best = NULL;
best_size = ~0UL;
 
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
unsigned long hole_size = adj_end - adj_start;
 
if (adj_start < start)
adj_start = start;
if (adj_end > end)
374,9 → 484,9
if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
 
if (entry->size < best_size) {
if (hole_size < best_size) {
best = entry;
best_size = entry->size;
best_size = hole_size;
}
}
 
384,7 → 494,13
}
 
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
* drm_mm_replace_node - move an allocation from @old to @new
* @old: drm_mm_node to remove from the allocator
* @new: drm_mm_node which should inherit @old's allocation
*
* This is useful for when drivers embed the drm_mm_node structure and hence
* can't move allocations by reassigning pointers. It's a combination of remove
* and insert with the guarantee that the allocation start will match.
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
402,12 → 518,46
EXPORT_SYMBOL(drm_mm_replace_node);
 
/**
* Initializa lru scanning.
* DOC: lru scan roaster
*
* Very often GPUs need to have continuous allocations for a given object. When
* evicting objects to make space for a new one it is therefore not most
* efficient when we simply start to select all objects from the tail of an LRU
* until there's a suitable hole: Especially for big objects or nodes that
* otherwise have special allocation constraints there's a good chance we evict
* lots of (smaller) objects unecessarily.
*
* The DRM range allocator supports this use-case through the scanning
* interfaces. First a scan operation needs to be initialized with
* drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
* objects to the roaster (probably by walking an LRU list, but this can be
* freely implemented) until a suitable hole is found or there's no further
* evitable object.
*
* The the driver must walk through all objects again in exactly the reverse
* order to restore the allocator state. Note that while the allocator is used
* in the scan mode no other operation is allowed.
*
* Finally the driver evicts all objects selected in the scan. Adding and
* removing an object is O(1), and since freeing a node is also O(1) the overall
* complexity is O(scanned_objects). So like the free stack which needs to be
* walked before a scan operation even begins this is linear in the number of
* objects. It doesn't seem to hurt badly.
*/
 
/**
* drm_mm_init_scan - initialize lru scanning
* @mm: drm_mm to scan
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for the allocation
*
* This simply sets up the scanning routines with the parameters for the desired
* hole.
* hole. Note that there's no need to specify allocation flags, since they only
* change the place a node is allocated from within a suitable hole.
*
* Warning: As long as the scan list is non-empty, no other operations than
* Warning:
* As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm,
427,12 → 577,20
EXPORT_SYMBOL(drm_mm_init_scan);
 
/**
* Initializa lru scanning.
* drm_mm_init_scan - initialize range-restricted lru scanning
* @mm: drm_mm to scan
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for the allocation
* @start: start of the allowed range for the allocation
* @end: end of the allowed range for the allocation
*
* This simply sets up the scanning routines with the parameters for the desired
* hole. This version is for range-restricted scans.
* hole. Note that there's no need to specify allocation flags, since they only
* change the place a node is allocated from within a suitable hole.
*
* Warning: As long as the scan list is non-empty, no other operations than
* Warning:
* As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm,
456,12 → 614,16
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
 
/**
* drm_mm_scan_add_block - add a node to the scan list
* @node: drm_mm_node to add
*
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
* Returns non-zero, if a hole has been found, zero otherwise.
* Returns:
* True if a hole has been found, false otherwise.
*/
int drm_mm_scan_add_block(struct drm_mm_node *node)
bool drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
501,15 → 663,16
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_end = hole_end;
return 1;
return true;
}
 
return 0;
return false;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);
 
/**
* Remove a node from the scan list.
* drm_mm_scan_remove_block - remove a node from the scan list
* @node: drm_mm_node to remove
*
* Nodes _must_ be removed in the exact same order from the scan list as they
* have been added, otherwise the internal state of the memory manager will be
519,10 → 682,11
* immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
* return the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
* return zero when no hole has been found.
* Returns:
* True if this block should be evicted, false otherwise. Will always
* return false when no hole has been found.
*/
int drm_mm_scan_remove_block(struct drm_mm_node *node)
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
543,7 → 707,15
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
 
int drm_mm_clean(struct drm_mm * mm)
/**
* drm_mm_clean - checks whether an allocator is clean
* @mm: drm_mm allocator to check
*
* Returns:
* True if the allocator is completely free, false if there's still a node
* allocated in it.
*/
bool drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->head_node.node_list;
 
551,6 → 723,14
}
EXPORT_SYMBOL(drm_mm_clean);
 
/**
* drm_mm_init - initialize a drm-mm allocator
* @mm: the drm_mm structure to initialize
* @start: start of the range managed by @mm
* @size: end of the range managed by @mm
*
* Note that @mm must be cleared to 0 before calling this function.
*/
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
572,6 → 752,13
}
EXPORT_SYMBOL(drm_mm_init);
 
/**
* drm_mm_takedown - clean up a drm_mm allocator
* @mm: drm_mm allocator to clean up
*
* Note that it is a bug to call this function on an allocator which is not
* clean.
*/
void drm_mm_takedown(struct drm_mm * mm)
{
WARN(!list_empty(&mm->head_node.node_list),
597,6 → 784,11
return 0;
}
 
/**
* drm_mm_debug_table - dump allocator state to dmesg
* @mm: drm_mm allocator to dump
* @prefix: prefix to use for dumping to dmesg
*/
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
635,6 → 827,11
return 0;
}
 
/**
* drm_mm_dump_table - dump allocator state to a seq_file
* @m: seq_file to dump to
* @mm: drm_mm allocator to dump
*/
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
/drivers/video/drm/drm_modes.c
35,15 → 35,14
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modes.h>
 
#include "drm_crtc_internal.h"
 
/**
* drm_mode_debug_printmodeline - debug print a mode
* @dev: DRM device
* drm_mode_debug_printmodeline - print a mode to dmesg
* @mode: mode to print
*
* LOCKING:
* None.
*
* Describe @mode using DRM_DEBUG.
*/
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
59,19 → 58,78
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
 
/**
* drm_cvt_mode -create a modeline based on CVT algorithm
* drm_mode_create - create a new display mode
* @dev: DRM device
*
* Create a new, cleared drm_display_mode with kzalloc, allocate an ID for it
* and return it.
*
* Returns:
* Pointer to new mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_create(struct drm_device *dev)
{
struct drm_display_mode *nmode;
 
nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
if (!nmode)
return NULL;
 
if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
kfree(nmode);
return NULL;
}
 
return nmode;
}
EXPORT_SYMBOL(drm_mode_create);
 
/**
* drm_mode_destroy - remove a mode
* @dev: DRM device
* @mode: mode to remove
*
* Release @mode's unique ID, then free it @mode structure itself using kfree.
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
{
if (!mode)
return;
 
drm_mode_object_put(dev, &mode->base);
 
kfree(mode);
}
EXPORT_SYMBOL(drm_mode_destroy);
 
/**
* drm_mode_probed_add - add a mode to a connector's probed_mode list
* @connector: connector the new mode
* @mode: mode data
*
* Add @mode to @connector's probed_mode list for later use. This list should
* then in a second step get filtered and all the modes actually supported by
* the hardware moved to the @connector's modes list.
*/
void drm_mode_probed_add(struct drm_connector *connector,
struct drm_display_mode *mode)
{
WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
 
list_add_tail(&mode->head, &connector->probed_modes);
}
EXPORT_SYMBOL(drm_mode_probed_add);
 
/**
* drm_cvt_mode -create a modeline based on the CVT algorithm
* @dev: drm device
* @hdisplay: hdisplay size
* @vdisplay: vdisplay size
* @vrefresh : vrefresh rate
* @reduced : Whether the GTF calculation is simplified
* @interlaced:Whether the interlace is supported
* @reduced: whether to use reduced blanking
* @interlaced: whether to compute an interlaced mode
* @margins: whether to add margins (borders)
*
* LOCKING:
* none.
*
* return the modeline based on CVT algorithm
*
* This function is called to generate the modeline based on CVT algorithm
* according to the hdisplay, vdisplay, vrefresh.
* It is based from the VESA(TM) Coordinated Video Timing Generator by
80,12 → 138,17
*
* And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
* What I have done is to translate it by using integer calculation.
*
* Returns:
* The modeline based on the CVT algorithm stored in a drm_display_mode object.
* The display mode object is allocated with drm_mode_create(). Returns NULL
* when no mode could be allocated.
*/
#define HV_FACTOR 1000
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins)
{
#define HV_FACTOR 1000
/* 1) top/bottom margin size (% of height) - default: 1.8, */
#define CVT_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
279,23 → 342,25
EXPORT_SYMBOL(drm_cvt_mode);
 
/**
* drm_gtf_mode_complex - create the modeline based on full GTF algorithm
*
* drm_gtf_mode_complex - create the modeline based on the full GTF algorithm
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
* @margins :desired margin size
* @GTF_[MCKJ] :extended GTF formula parameters
* @interlaced: whether to compute an interlaced mode
* @margins: desired margin (borders) size
* @GTF_M: extended GTF formula parameters
* @GTF_2C: extended GTF formula parameters
* @GTF_K: extended GTF formula parameters
* @GTF_2J: extended GTF formula parameters
*
* LOCKING.
* none.
*
* return the modeline based on full GTF algorithm.
*
* GTF feature blocks specify C and J in multiples of 0.5, so we pass them
* in here multiplied by two. For a C of 40, pass in 80.
*
* Returns:
* The modeline based on the full GTF algorithm stored in a drm_display_mode object.
* The display mode object is allocated with drm_mode_create(). Returns NULL
* when no mode could be allocated.
*/
struct drm_display_mode *
drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
465,18 → 530,14
EXPORT_SYMBOL(drm_gtf_mode_complex);
 
/**
* drm_gtf_mode - create the modeline based on GTF algorithm
*
* drm_gtf_mode - create the modeline based on the GTF algorithm
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
* @margins :whether the margin is supported
* @interlaced: whether to compute an interlaced mode
* @margins: desired margin (borders) size
*
* LOCKING.
* none.
*
* return the modeline based on GTF algorithm
*
* This function is to create the modeline based on the GTF algorithm.
494,18 → 555,31
* C = 40
* K = 128
* J = 20
*
* Returns:
* The modeline based on the GTF algorithm stored in a drm_display_mode object.
* The display mode object is allocated with drm_mode_create(). Returns NULL
* when no mode could be allocated.
*/
struct drm_display_mode *
drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
bool lace, int margins)
bool interlaced, int margins)
{
return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
margins, 600, 40 * 2, 128, 20 * 2);
return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh,
interlaced, margins,
600, 40 * 2, 128, 20 * 2);
}
EXPORT_SYMBOL(drm_gtf_mode);
 
#ifdef CONFIG_VIDEOMODE_HELPERS
int drm_display_mode_from_videomode(const struct videomode *vm,
/**
* drm_display_mode_from_videomode - fill in @dmode using @vm,
* @vm: videomode structure to use as source
* @dmode: drm_display_mode structure to use as destination
*
* Fills out @dmode using the display mode specified in @vm.
*/
void drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode)
{
dmode->hdisplay = vm->hactive;
536,8 → 610,6
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
dmode->flags |= DRM_MODE_FLAG_DBLCLK;
drm_mode_set_name(dmode);
 
return 0;
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
 
551,6 → 623,9
* This function is expensive and should only be used, if only one mode is to be
* read from DT. To get multiple modes start with of_get_display_timings and
* work with that instead.
*
* Returns:
* 0 on success, a negative errno code when no of videomode node was found.
*/
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, int index)
578,10 → 653,8
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
*
* LOCKING:
* None.
*
* Set the name of @mode to a standard format.
* Set the name of @mode to a standard format which is <hdisplay>x<vdisplay>
* with an optional 'i' suffix for interlaced modes.
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
593,54 → 666,12
}
EXPORT_SYMBOL(drm_mode_set_name);
 
/**
* drm_mode_width - get the width of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's width (hdisplay) value.
*
* FIXME: is this needed?
*
* RETURNS:
* @mode->hdisplay
*/
int drm_mode_width(const struct drm_display_mode *mode)
{
return mode->hdisplay;
 
}
EXPORT_SYMBOL(drm_mode_width);
 
/**
* drm_mode_height - get the height of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's height (vdisplay) value.
*
* FIXME: is this needed?
*
* RETURNS:
* @mode->vdisplay
*/
int drm_mode_height(const struct drm_display_mode *mode)
{
return mode->vdisplay;
}
EXPORT_SYMBOL(drm_mode_height);
 
/** drm_mode_hsync - get the hsync of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
* Returns:
* @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
* value first if it is not yet set.
*/
int drm_mode_hsync(const struct drm_display_mode *mode)
{
664,17 → 695,9
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
* LOCKING:
* None.
*
* Return @mode's vrefresh rate in Hz or calculate it if necessary.
*
* FIXME: why is this needed? shouldn't vrefresh be set already?
*
* RETURNS:
* Vertical refresh rate. It will be the result of actual value plus 0.5.
* If it is 70.288, it will return 70Hz.
* If it is 59.6, it will return 60Hz.
* Returns:
* @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
* value first if it is not yet set.
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
703,15 → 726,12
EXPORT_SYMBOL(drm_mode_vrefresh);
 
/**
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
* drm_mode_set_crtcinfo - set CRTC modesetting timing parameters
* @p: mode
* @adjust_flags: a combination of adjustment flags
*
* LOCKING:
* None.
* Setup the CRTC modesetting timing parameters for @p, adjusting if necessary.
*
* Setup the CRTC modesetting parameters for @p, adjusting if necessary.
*
* - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
* interlaced modes.
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
778,15 → 798,11
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
 
 
/**
* drm_mode_copy - copy the mode
* @dst: mode to overwrite
* @src: mode to copy
*
* LOCKING:
* None.
*
* Copy an existing mode into another mode, preserving the object id and
* list head of the destination mode.
*/
803,13 → 819,14
 
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @m: mode to duplicate
* @dev: drm_device to allocate the duplicated mode for
* @mode: mode to duplicate
*
* LOCKING:
* None.
*
* Just allocate a new mode, copy the existing mode into it, and return
* a pointer to it. Used to create new instances of established modes.
*
* Returns:
* Pointer to duplicated mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode)
831,12 → 848,9
* @mode1: first mode
* @mode2: second mode
*
* LOCKING:
* None.
*
* Check to see if @mode1 and @mode2 are equivalent.
*
* RETURNS:
* Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
862,13 → 876,10
* @mode1: first mode
* @mode2: second mode
*
* LOCKING:
* None.
*
* Check to see if @mode1 and @mode2 are equivalent, but
* don't check the pixel clocks nor the stereo layout.
*
* RETURNS:
* Returns:
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
898,25 → 909,19
* @mode_list: list of modes to check
* @maxX: maximum width
* @maxY: maximum height
* @maxPitch: max pitch
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* The DRM device (@dev) has size and pitch limits. Here we validate the
* modes we probed for @dev against those limits and set their status as
* necessary.
* This function is a helper which can be used to validate modes against size
* limitations of the DRM device/connector. If a mode is too big its status
* memeber is updated with the appropriate validation failure code. The list
* itself is not changed.
*/
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY, int maxPitch)
int maxX, int maxY)
{
struct drm_display_mode *mode;
 
list_for_each_entry(mode, mode_list, head) {
if (maxPitch > 0 && mode->hdisplay > maxPitch)
mode->status = MODE_BAD_WIDTH;
 
if (maxX > 0 && mode->hdisplay > maxX)
mode->status = MODE_VIRTUAL_X;
 
932,12 → 937,10
* @mode_list: list of modes to check
* @verbose: be verbose about it
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Once mode list generation is complete, a caller can use this routine to
* remove invalid modes from a mode list. If any of the modes have a
* status other than %MODE_OK, they are removed from @mode_list and freed.
* This helper function can be used to prune a display mode list after
* validation has been completed. All modes who's status is not MODE_OK will be
* removed from the list, and if @verbose the status code and mode name is also
* printed to dmesg.
*/
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose)
964,13 → 967,10
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
* LOCKING:
* None.
*
* Compare two modes, given by @lh_a and @lh_b, returning a value indicating
* which is better.
*
* RETURNS:
* Returns:
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
998,12 → 998,9
 
/**
* drm_mode_sort - sort mode list
* @mode_list: list to sort
* @mode_list: list of drm_display_mode structures to sort
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* Sort @mode_list by favorability, putting good modes first.
* Sort @mode_list by favorability, moving good modes to the head of the list.
*/
void drm_mode_sort(struct list_head *mode_list)
{
1014,21 → 1011,24
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
* @merge_type_bits: whether to merge or overright type bits.
*
* LOCKING:
* Caller must hold a lock protecting @mode_list.
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
* list and only adds different modes. All modes unverified after this point
* will be removed by the prune invalid modes.
* list and only adds different/new modes.
*
* This is just a helper functions doesn't validate any modes itself and also
* doesn't prune any invalid modes. Callers need to do that themselves.
*/
void drm_mode_connector_list_update(struct drm_connector *connector)
void drm_mode_connector_list_update(struct drm_connector *connector,
bool merge_type_bits)
{
struct drm_display_mode *mode;
struct drm_display_mode *pmode, *pt;
int found_it;
 
WARN_ON(!mutex_is_locked(&connector->dev->mode_config.mutex));
 
list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
head) {
found_it = 0;
1039,7 → 1039,10
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
if (merge_type_bits)
mode->type |= pmode->type;
else
mode->type = pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;
/drivers/video/drm/drm_modeset_lock.c
0,0 → 1,248
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_lock.h>
 
/**
* DOC: kms locking
*
* As KMS moves toward more fine grained locking, and atomic ioctl where
* userspace can indirectly control locking order, it becomes necessary
* to use ww_mutex and acquire-contexts to avoid deadlocks. But because
* the locking is more distributed around the driver code, we want a bit
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
* For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
*
* The basic usage pattern is to:
*
* drm_modeset_acquire_init(&ctx)
* retry:
* foreach (lock in random_ordered_set_of_locks) {
* ret = drm_modeset_lock(lock, &ctx)
* if (ret == -EDEADLK) {
* drm_modeset_backoff(&ctx);
* goto retry;
* }
* }
*
* ... do stuff ...
*
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*/
 
 
/**
* drm_modeset_acquire_init - initialize acquire context
* @ctx: the acquire context
* @flags: for future
*/
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags)
{
memset(ctx, 0, sizeof(*ctx));
ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
INIT_LIST_HEAD(&ctx->locked);
}
EXPORT_SYMBOL(drm_modeset_acquire_init);
 
/**
* drm_modeset_acquire_fini - cleanup acquire context
* @ctx: the acquire context
*/
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
{
ww_acquire_fini(&ctx->ww_ctx);
}
EXPORT_SYMBOL(drm_modeset_acquire_fini);
 
/**
* drm_modeset_drop_locks - drop all locks
* @ctx: the acquire context
*
* Drop all locks currently held against this acquire context.
*/
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
{
WARN_ON(ctx->contended);
while (!list_empty(&ctx->locked)) {
struct drm_modeset_lock *lock;
 
lock = list_first_entry(&ctx->locked,
struct drm_modeset_lock, head);
 
drm_modeset_unlock(lock);
}
}
EXPORT_SYMBOL(drm_modeset_drop_locks);
 
static inline int modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx,
bool interruptible, bool slow)
{
int ret;
 
WARN_ON(ctx->contended);
 
if (interruptible && slow) {
ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (interruptible) {
ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (slow) {
ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
ret = 0;
} else {
ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
}
if (!ret) {
WARN_ON(!list_empty(&lock->head));
list_add(&lock->head, &ctx->locked);
} else if (ret == -EALREADY) {
/* we already hold the lock.. this is fine. For atomic
* we will need to be able to drm_modeset_lock() things
* without having to keep track of what is already locked
* or not.
*/
ret = 0;
} else if (ret == -EDEADLK) {
ctx->contended = lock;
}
 
return ret;
}
 
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
bool interruptible)
{
struct drm_modeset_lock *contended = ctx->contended;
 
ctx->contended = NULL;
 
if (WARN_ON(!contended))
return 0;
 
drm_modeset_drop_locks(ctx);
 
return modeset_lock(contended, ctx, interruptible, true);
}
 
/**
* drm_modeset_backoff - deadlock avoidance backoff
* @ctx: the acquire context
*
* If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
* you must call this function to drop all currently held locks and
* block until the contended lock becomes available.
*/
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
{
modeset_backoff(ctx, false);
}
EXPORT_SYMBOL(drm_modeset_backoff);
 
/**
* drm_modeset_backoff_interruptible - deadlock avoidance backoff
* @ctx: the acquire context
*
* Interruptible version of drm_modeset_backoff()
*/
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
{
return modeset_backoff(ctx, true);
}
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
 
/**
* drm_modeset_lock - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
*
* If ctx is not NULL, then its ww acquire context is used and the
* lock will be tracked by the context and can be released by calling
* drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff().
*/
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
return modeset_lock(lock, ctx, false, false);
 
ww_mutex_lock(&lock->mutex, NULL);
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock);
 
/**
* drm_modeset_lock_interruptible - take modeset lock
* @lock: lock to take
* @ctx: acquire ctx
*
* Interruptible version of drm_modeset_lock()
*/
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
return modeset_lock(lock, ctx, true, false);
 
return ww_mutex_lock_interruptible(&lock->mutex, NULL);
}
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
 
/**
* drm_modeset_unlock - drop modeset lock
* @lock: lock to release
*/
void drm_modeset_unlock(struct drm_modeset_lock *lock)
{
list_del_init(&lock->head);
ww_mutex_unlock(&lock->mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock);
 
/* Temporary.. until we have sufficiently fine grained locking, there
* are a couple scenarios where it is convenient to grab all crtc locks.
* It is planned to remove this:
*/
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_crtc *crtc;
int ret = 0;
 
list_for_each_entry(crtc, &config->crtc_list, head) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
}
 
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
/drivers/video/drm/drm_pci.c
1,17 → 1,3
/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
/**
* \file drm_pci.c
* \brief Functions and ioctls to manage PCI memory
*
* \warning These interfaces aren't stable yet.
*
* \todo Implement the remaining ioctl's for the PCI pools.
* \todo The wrappers here are so thin that they would be better off inlined..
*
* \author José Fonseca <jrfonseca@tungstengraphics.com>
* \author Leif Delgass <ldelgass@retinalburn.net>
*/
 
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
38,26 → 24,25
 
//#include <linux/pci.h>
//#include <linux/slab.h>
//#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
 
#include <syscall.h>
 
/**********************************************************************/
/** \name PCI memory */
/*@{*/
 
/**
* \brief Allocate a PCI consistent memory block, for DMA.
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
* @dev: DRM device
* @size: size of block to allocate
* @align: alignment of block
*
* Return: A handle to the allocated memory block on success or NULL on
* failure.
*/
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
unsigned long addr;
size_t sz;
#endif
 
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
110,7 → 95,9
}
 
/**
* \brief Free a PCI consistent memory block
* drm_pci_free - Free a PCI consistent memory block
* @dev: DRM device
* @dmah: handle to memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
245,7 → 232,6
return ret;
}
 
 
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
336,14 → 322,10
 
 
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
 
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
 
// mutex_lock(&drm_global_mutex);
 
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
/drivers/video/drm/drm_plane_helper.c
0,0 → 1,371
/*
* Copyright (C) 2014 Intel Corporation
*
* DRM universal plane helper functions
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
 
#include <linux/list.h>
#include <drm/drmP.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_plane_helper.h>
 
#define SUBPIXEL_MASK 0xffff
 
/*
* This is the minimal list of formats that seem to be safe for modeset use
* with all current DRM drivers. Most hardware can actually support more
* formats than this and drivers may specify a more accurate list when
* creating the primary plane. However drivers that still call
* drm_plane_init() will use this minimal format list as the default.
*/
static const uint32_t safe_modeset_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
 
/*
* Returns the connectors currently associated with a CRTC. This function
* should be called twice: once with a NULL connector list to retrieve
* the list size, and once with the properly allocated list to be filled in.
*/
static int get_connectors_for_crtc(struct drm_crtc *crtc,
struct drm_connector **connector_list,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
int count = 0;
 
/*
* Note: Once we change the plane hooks to more fine-grained locking we
* need to grab the connection_mutex here to be able to make these
* checks.
*/
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
*(connector_list++) = connector;
 
count++;
}
 
return count;
}
 
/**
* drm_plane_helper_check_update() - Check plane update for validity
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
* @src: source coordinates in 16.16 fixed point
* @dest: integer destination coordinates
* @clip: integer clipping coordinates
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
* @can_position: is it legal to position the plane such that it
* doesn't cover the entire crtc? This will generally
* only be false for primary planes.
* @can_update_disabled: can the plane be updated while the crtc
* is disabled?
* @visible: output parameter indicating whether plane is still visible after
* clipping
*
* Checks that a desired plane update is valid. Drivers that provide
* their own plane handling rather than helper-provided implementations may
* still wish to call this function to avoid duplication of error checking
* code.
*
* RETURNS:
* Zero if update appears valid, error code on failure
*/
int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_rect *src,
struct drm_rect *dest,
const struct drm_rect *clip,
int min_scale,
int max_scale,
bool can_position,
bool can_update_disabled,
bool *visible)
{
int hscale, vscale;
 
if (!crtc->enabled && !can_update_disabled) {
DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
}
 
/* Check scaling */
hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
DRM_DEBUG_KMS("Invalid scaling of plane\n");
return -ERANGE;
}
 
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
* Plane isn't visible; some drivers can handle this
* so we just return success here. Drivers that can't
* (including those that use the primary plane helper's
* update function) will return an error from their
* update_plane handler.
*/
return 0;
 
if (!can_position && !drm_rect_equals(dest, clip)) {
DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
return -EINVAL;
}
 
return 0;
}
EXPORT_SYMBOL(drm_plane_helper_check_update);
 
/**
* drm_primary_helper_update() - Helper for primary plane update
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
* @crtc_x: x offset of primary plane on crtc
* @crtc_y: y offset of primary plane on crtc
* @crtc_w: width of primary plane rectangle on crtc
* @crtc_h: height of primary plane rectangle on crtc
* @src_x: x offset of @fb for panning
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
* @src_h: height of source rectangle in @fb
*
* Provides a default plane update handler for primary planes. This is handler
* is called in response to a userspace SetPlane operation on the plane with a
* non-NULL framebuffer. We call the driver's modeset handler to update the
* framebuffer.
*
* SetPlane() on a primary plane of a disabled CRTC is not supported, and will
* return an error.
*
* Note that we make some assumptions about hardware limitations that may not be
* true for all hardware --
* 1) Primary plane cannot be repositioned.
* 2) Primary plane cannot be scaled.
* 3) Primary plane must cover the entire CRTC.
* 4) Subpixel positioning is not supported.
* Drivers for hardware that don't have these restrictions can provide their
* own implementation rather than using this helper.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_mode_set set = {
.crtc = crtc,
.fb = fb,
.mode = &crtc->mode,
.x = src_x >> 16,
.y = src_y >> 16,
};
struct drm_rect src = {
.x1 = src_x,
.y1 = src_y,
.x2 = src_x + src_w,
.y2 = src_y + src_h,
};
struct drm_rect dest = {
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
.x2 = crtc->mode.hdisplay,
.y2 = crtc->mode.vdisplay,
};
struct drm_connector **connector_list;
int num_connectors, ret;
bool visible;
 
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, false, &visible);
if (ret)
return ret;
 
if (!visible)
/*
* Primary plane isn't visible. Note that unless a driver
* provides their own disable function, this will just
* wind up returning -EINVAL to userspace.
*/
return plane->funcs->disable_plane(plane);
 
/* Find current connectors for CRTC */
num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
BUG_ON(num_connectors == 0);
connector_list = kzalloc(num_connectors * sizeof(*connector_list),
GFP_KERNEL);
if (!connector_list)
return -ENOMEM;
get_connectors_for_crtc(crtc, connector_list, num_connectors);
 
set.connectors = connector_list;
set.num_connectors = num_connectors;
 
/*
* We call set_config() directly here rather than using
* drm_mode_set_config_internal. We're reprogramming the same
* connectors that were already in use, so we shouldn't need the extra
* cross-CRTC fb refcounting to accomodate stealing connectors.
* drm_mode_setplane() already handles the basic refcounting for the
* framebuffers involved in this operation.
*/
ret = crtc->funcs->set_config(&set);
 
kfree(connector_list);
return ret;
}
EXPORT_SYMBOL(drm_primary_helper_update);
 
/**
* drm_primary_helper_disable() - Helper for primary plane disable
* @plane: plane to disable
*
* Provides a default plane disable handler for primary planes. This is handler
* is called in response to a userspace SetPlane operation on the plane with a
* NULL framebuffer parameter. It unconditionally fails the disable call with
* -EINVAL the only way to disable the primary plane without driver support is
* to disable the entier CRTC. Which does not match the plane ->disable hook.
*
* Note that some hardware may be able to disable the primary plane without
* disabling the whole CRTC. Drivers for such hardware should provide their
* own disable handler that disables just the primary plane (and they'll likely
* need to provide their own update handler as well to properly re-enable a
* disabled primary plane).
*
* RETURNS:
* Unconditionally returns -EINVAL.
*/
int drm_primary_helper_disable(struct drm_plane *plane)
{
return -EINVAL;
}
EXPORT_SYMBOL(drm_primary_helper_disable);
 
/**
* drm_primary_helper_destroy() - Helper for primary plane destruction
* @plane: plane to destroy
*
* Provides a default plane destroy handler for primary planes. This handler
* is called during CRTC destruction. We disable the primary plane, remove
* it from the DRM plane list, and deallocate the plane structure.
*/
void drm_primary_helper_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
kfree(plane);
}
EXPORT_SYMBOL(drm_primary_helper_destroy);
 
const struct drm_plane_funcs drm_primary_helper_funcs = {
.update_plane = drm_primary_helper_update,
.disable_plane = drm_primary_helper_disable,
.destroy = drm_primary_helper_destroy,
};
EXPORT_SYMBOL(drm_primary_helper_funcs);
 
/**
* drm_primary_helper_create_plane() - Create a generic primary plane
* @dev: drm device
* @formats: pixel formats supported, or NULL for a default safe list
* @num_formats: size of @formats; ignored if @formats is NULL
*
* Allocates and initializes a primary plane that can be used with the primary
* plane helpers. Drivers that wish to use driver-specific plane structures or
* provide custom handler functions may perform their own allocation and
* initialization rather than calling this function.
*/
struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
const uint32_t *formats,
int num_formats)
{
struct drm_plane *primary;
int ret;
 
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
if (primary == NULL) {
DRM_DEBUG_KMS("Failed to allocate primary plane\n");
return NULL;
}
 
if (formats == NULL) {
formats = safe_modeset_formats;
num_formats = ARRAY_SIZE(safe_modeset_formats);
}
 
/* possible_crtc's will be filled in later by crtc_init */
ret = drm_universal_plane_init(dev, primary, 0,
&drm_primary_helper_funcs,
formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
if (ret) {
kfree(primary);
primary = NULL;
}
 
return primary;
}
EXPORT_SYMBOL(drm_primary_helper_create_plane);
 
/**
* drm_crtc_init - Legacy CRTC initialization function
* @dev: DRM device
* @crtc: CRTC object to init
* @funcs: callbacks for the new CRTC
*
* Initialize a CRTC object with a default helper-provided primary plane and no
* cursor plane.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs)
{
struct drm_plane *primary;
 
primary = drm_primary_helper_create_plane(dev, NULL, 0);
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
}
EXPORT_SYMBOL(drm_crtc_init);
/drivers/video/drm/drm_probe_helper.c
0,0 → 1,455
/*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
*
* DRM core CRTC related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Keith Packard
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
 
#include <linux/export.h>
#include <linux/moduleparam.h>
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
 
/**
* DOC: output probing helper overview
*
* This library provides some helper code for output probing. It provides an
* implementation of the core connector->fill_modes interface with
* drm_helper_probe_single_connector_modes.
*
* It also provides support for polling connectors with a work item and for
* generic hotplug interrupt handling where the driver doesn't or cannot keep
* track of a per-connector hpd interrupt.
*
* This helper library can be used independently of the modeset helper library.
* Drivers can also overwrite different parts e.g. use their own hotplug
* handling code to avoid probing unrelated outputs.
*/
 
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
 
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
struct drm_display_mode *mode;
 
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
DRM_MODE_FLAG_3D_MASK))
return;
 
list_for_each_entry(mode, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
!(flags & DRM_MODE_FLAG_3D_MASK))
mode->status = MODE_NO_STEREO;
}
 
return;
}
 
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
int mode_flags = 0;
bool verbose_prune = true;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
connector->name);
/* set all modes to the unverified state */
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
 
if (connector->force) {
if (connector->force == DRM_FORCE_ON)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
}
 
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
drm_kms_helper_poll_enable(dev);
 
dev->mode_config.poll_running = drm_kms_helper_poll;
 
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, connector->name);
drm_mode_connector_update_edid_property(connector, NULL);
verbose_prune = false;
goto prune;
}
 
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
{
if (connector->override_edid) {
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
 
count = drm_add_edid_modes(connector, edid);
} else
count = (*connector_funcs->get_modes)(connector);
}
 
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
if (count == 0)
goto prune;
 
drm_mode_connector_list_update(connector, merge_type_bits);
 
if (maxX && maxY)
drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
 
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
drm_mode_validate_flag(connector, mode_flags);
 
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK && connector_funcs->mode_valid)
mode->status = connector_funcs->mode_valid(connector,
mode);
}
 
prune:
drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
 
if (list_empty(&connector->modes))
return 0;
 
list_for_each_entry(mode, &connector->modes, head)
mode->vrefresh = drm_mode_vrefresh(mode);
 
drm_mode_sort(&connector->modes);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
connector->name);
list_for_each_entry(mode, &connector->modes, head) {
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_debug_printmodeline(mode);
}
 
return count;
}
 
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be use as a generic implementation of the ->fill_modes()
* @connector vfunc for drivers that use the crtc helpers for output mode
* filtering and detection.
*
* Returns:
* The number of modes found on @connector.
*/
int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true);
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
 
/**
* drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* This operates like drm_hehlper_probe_single_connector_modes except it
* replaces the mode bits instead of merging them for preferred modes.
*/
int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false);
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge);
 
/**
* drm_kms_helper_hotplug_event - fire off KMS hotplug events
* @dev: drm_device whose connector state changed
*
* This function fires off the uevent for userspace and also calls the
* output_poll_changed function, which is most commonly used to inform the fbdev
* emulation code and allow it to update the fbcon output configuration.
*
* Drivers should call this from their hotplug handling code when a change is
* detected. Note that this function does not do any output detection of its
* own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
* driver already.
*
* This function must be called from process context with no mode
* setting locks held.
*/
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
/* send a uevent + call fbdev */
// drm_sysfs_hotplug_event(dev);
// if (dev->mode_config.funcs->output_poll_changed)
// dev->mode_config.funcs->output_poll_changed(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status;
bool repoll = false, changed = false;
 
if (!drm_kms_helper_poll)
return;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
/* Ignore forced connectors. */
if (connector->force)
continue;
 
/* Ignore HPD capable connectors and connectors where we don't
* want any hotplug detection at all for polling. */
if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
continue;
 
repoll = true;
 
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
 
connector->status = connector->funcs->detect(connector, false);
if (old_status != connector->status) {
const char *old, *new;
 
old = drm_get_connector_status_name(old_status);
new = drm_get_connector_status_name(connector->status);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
"status updated from %s to %s\n",
connector->base.id,
connector->name,
old, new);
 
changed = true;
}
}
 
mutex_unlock(&dev->mode_config.mutex);
 
// if (changed)
// drm_kms_helper_hotplug_event(dev);
 
// if (repoll)
// schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
 
/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
*
* This function disables the output polling work.
*
* Drivers can call this helper from their device suspend implementation. It is
* not an error to call this even when output polling isn't enabled or arlready
* disabled.
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
// cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
/**
* drm_kms_helper_poll_enable - re-enable output polling.
* @dev: drm_device
*
* This function re-enables the output polling work.
*
* Drivers can call this helper from their device resume implementation. It is
* an error to call this when the output polling support has not yet been set
* up.
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
 
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
return;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
 
// if (poll)
// schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
/**
* drm_kms_helper_poll_init - initialize and enable output polling
* @dev: drm_device
*
* This function intializes and then also enables output polling support for
* @dev. Drivers which do not have reliable hotplug support in hardware can use
* this helper infrastructure to regularly poll such connectors for changes in
* their connection state.
*
* Drivers can control which connectors are polled by setting the
* DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
* connectors where probing live outputs can result in visual distortion drivers
* should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
* Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
* completely ignored by the polling logic.
*
* Note that a connector can be both polled and probed from the hotplug handler,
* in case the hotplug interrupt is known to be unreliable.
*/
void drm_kms_helper_poll_init(struct drm_device *dev)
{
INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
dev->mode_config.poll_enabled = true;
 
drm_kms_helper_poll_enable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
 
/**
* drm_kms_helper_poll_fini - disable output polling and clean it up
* @dev: drm_device
*/
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
/**
* drm_helper_hpd_irq_event - hotplug processing
* @dev: drm_device
*
* Drivers can use this helper function to run a detect cycle on all connectors
* which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
* other connectors are ignored, which is useful to avoid reprobing fixed
* panels.
*
* This helper function is useful for drivers which can't or don't track hotplug
* interrupts for each connector.
*
* Drivers which support hotplug interrupts for each connector individually and
* which have a more fine-grained detect logic should bypass this code and
* directly call drm_kms_helper_hotplug_event() in case the connector state
* changed.
*
* This function must be called from process context with no mode
* setting locks held.
*
* Note that a connector can be both polled and probed from the hotplug handler,
* in case the hotplug interrupt is known to be unreliable.
*/
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
enum drm_connector_status old_status;
bool changed = false;
 
if (!dev->mode_config.poll_enabled)
return false;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
/* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
 
old_status = connector->status;
 
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
if (old_status != connector->status)
changed = true;
}
 
mutex_unlock(&dev->mode_config.mutex);
 
if (changed)
drm_kms_helper_hotplug_event(dev);
 
return changed;
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
/drivers/video/drm/drm_rect.c
293,3 → 293,143
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
}
EXPORT_SYMBOL(drm_rect_debug_print);
 
/**
* drm_rect_rotate - Rotate the rectangle
* @r: rectangle to be rotated
* @width: Width of the coordinate space
* @height: Height of the coordinate space
* @rotation: Transformation to be applied
*
* Apply @rotation to the coordinates of rectangle @r.
*
* @width and @height combined with @rotation define
* the location of the new origin.
*
* @width correcsponds to the horizontal and @height
* to the vertical axis of the untransformed coordinate
* space.
*/
void drm_rect_rotate(struct drm_rect *r,
int width, int height,
unsigned int rotation)
{
struct drm_rect tmp;
 
if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
tmp = *r;
 
if (rotation & BIT(DRM_REFLECT_X)) {
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
}
 
if (rotation & BIT(DRM_REFLECT_Y)) {
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
}
}
 
switch (rotation & 0xf) {
case BIT(DRM_ROTATE_0):
break;
case BIT(DRM_ROTATE_90):
tmp = *r;
r->x1 = tmp.y1;
r->x2 = tmp.y2;
r->y1 = width - tmp.x2;
r->y2 = width - tmp.x1;
break;
case BIT(DRM_ROTATE_180):
tmp = *r;
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
break;
case BIT(DRM_ROTATE_270):
tmp = *r;
r->x1 = height - tmp.y2;
r->x2 = height - tmp.y1;
r->y1 = tmp.x1;
r->y2 = tmp.x2;
break;
default:
break;
}
}
EXPORT_SYMBOL(drm_rect_rotate);
 
/**
* drm_rect_rotate_inv - Inverse rotate the rectangle
* @r: rectangle to be rotated
* @width: Width of the coordinate space
* @height: Height of the coordinate space
* @rotation: Transformation whose inverse is to be applied
*
* Apply the inverse of @rotation to the coordinates
* of rectangle @r.
*
* @width and @height combined with @rotation define
* the location of the new origin.
*
* @width correcsponds to the horizontal and @height
* to the vertical axis of the original untransformed
* coordinate space, so that you never have to flip
* them when doing a rotatation and its inverse.
* That is, if you do:
*
* drm_rotate(&r, width, height, rotation);
* drm_rotate_inv(&r, width, height, rotation);
*
* you will always get back the original rectangle.
*/
void drm_rect_rotate_inv(struct drm_rect *r,
int width, int height,
unsigned int rotation)
{
struct drm_rect tmp;
 
switch (rotation & 0xf) {
case BIT(DRM_ROTATE_0):
break;
case BIT(DRM_ROTATE_90):
tmp = *r;
r->x1 = width - tmp.y2;
r->x2 = width - tmp.y1;
r->y1 = tmp.x1;
r->y2 = tmp.x2;
break;
case BIT(DRM_ROTATE_180):
tmp = *r;
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
break;
case BIT(DRM_ROTATE_270):
tmp = *r;
r->x1 = tmp.y1;
r->x2 = tmp.y2;
r->y1 = height - tmp.x2;
r->y2 = height - tmp.x1;
break;
default:
break;
}
 
if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
tmp = *r;
 
if (rotation & BIT(DRM_REFLECT_X)) {
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
}
 
if (rotation & BIT(DRM_REFLECT_Y)) {
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
}
}
}
EXPORT_SYMBOL(drm_rect_rotate_inv);
/drivers/video/drm/drm_stub.c
1,10 → 1,3
/**
* \file drm_stub.h
* Stub support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*/
 
/*
* Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
11,6 → 4,8
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Author Rickard E. (Rik) Faith <faith@valinux.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
31,6 → 26,7
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
48,6 → 44,10
unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
EXPORT_SYMBOL(drm_rnodes);
 
/* 1 to allow user space to request universal planes (experimental) */
unsigned int drm_universal_planes = 0;
EXPORT_SYMBOL(drm_universal_planes);
 
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
 
54,6 → 54,12
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
 
/*
* Default to use monotonic timestamps for wait-for-vblank and page-flip
* complete events.
*/
unsigned int drm_timestamp_monotonic = 1;
 
struct idr drm_minors_idr;
int drm_err(const char *func, const char *format, ...)
{
74,10 → 80,7
}
EXPORT_SYMBOL(drm_err);
 
void drm_ut_debug_printk(unsigned int request_level,
const char *prefix,
const char *function_name,
const char *format, ...)
void drm_ut_debug_printk(const char *function_name, const char *format, ...)
{
struct va_format vaf;
va_list args;
92,11 → 95,415
}
EXPORT_SYMBOL(drm_ut_debug_printk);
 
#if 0
struct drm_master *drm_master_create(struct drm_minor *minor)
{
struct drm_master *master;
 
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return NULL;
 
kref_init(&master->refcount);
spin_lock_init(&master->lock.spinlock);
init_waitqueue_head(&master->lock.lock_queue);
if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
kfree(master);
return NULL;
}
INIT_LIST_HEAD(&master->magicfree);
master->minor = minor;
 
return master;
}
 
struct drm_master *drm_master_get(struct drm_master *master)
{
kref_get(&master->refcount);
return master;
}
EXPORT_SYMBOL(drm_master_get);
 
static void drm_master_destroy(struct kref *kref)
{
struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_magic_entry *pt, *next;
struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
 
mutex_lock(&dev->struct_mutex);
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
 
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
 
if (master->unique) {
kfree(master->unique);
master->unique = NULL;
master->unique_len = 0;
}
 
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
kfree(pt);
}
 
drm_ht_remove(&master->magiclist);
 
mutex_unlock(&dev->struct_mutex);
kfree(master);
}
 
void drm_master_put(struct drm_master **master)
{
kref_put(&(*master)->refcount, drm_master_destroy);
*master = NULL;
}
EXPORT_SYMBOL(drm_master_put);
 
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret = 0;
 
mutex_lock(&dev->master_mutex);
if (file_priv->is_master)
goto out_unlock;
 
if (file_priv->minor->master) {
ret = -EINVAL;
goto out_unlock;
}
 
if (!file_priv->master) {
ret = -EINVAL;
goto out_unlock;
}
 
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, file_priv, false);
if (unlikely(ret != 0)) {
file_priv->is_master = 0;
drm_master_put(&file_priv->minor->master);
}
}
 
out_unlock:
mutex_unlock(&dev->master_mutex);
return ret;
}
 
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret = -EINVAL;
 
mutex_lock(&dev->master_mutex);
if (!file_priv->is_master)
goto out_unlock;
 
if (!file_priv->minor->master)
goto out_unlock;
 
ret = 0;
if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
 
out_unlock:
mutex_unlock(&dev->master_mutex);
return ret;
}
 
/*
* DRM Minors
* A DRM device can provide several char-dev interfaces on the DRM-Major. Each
* of them is represented by a drm_minor object. Depending on the capabilities
* of the device-driver, different interfaces are registered.
*
* Minors can be accessed via dev->$minor_name. This pointer is either
* NULL or a valid drm_minor pointer and stays valid as long as the device is
* valid. This means, DRM minors have the same life-time as the underlying
* device. However, this doesn't mean that the minor is active. Minors are
* registered and unregistered dynamically according to device-state.
*/
 
static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
unsigned int type)
{
switch (type) {
case DRM_MINOR_LEGACY:
return &dev->primary;
case DRM_MINOR_RENDER:
return &dev->render;
case DRM_MINOR_CONTROL:
return &dev->control;
default:
return NULL;
}
}
 
static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
{
struct drm_minor *minor;
 
minor = kzalloc(sizeof(*minor), GFP_KERNEL);
if (!minor)
return -ENOMEM;
 
minor->type = type;
minor->dev = dev;
 
*drm_minor_get_slot(dev, type) = minor;
return 0;
}
 
static void drm_minor_free(struct drm_device *dev, unsigned int type)
{
struct drm_minor **slot;
 
slot = drm_minor_get_slot(dev, type);
if (*slot) {
drm_mode_group_destroy(&(*slot)->mode_group);
kfree(*slot);
*slot = NULL;
}
}
 
static int drm_minor_register(struct drm_device *dev, unsigned int type)
{
struct drm_minor *new_minor;
unsigned long flags;
int ret;
int minor_id;
 
DRM_DEBUG("\n");
 
new_minor = *drm_minor_get_slot(dev, type);
if (!new_minor)
return 0;
 
idr_preload(GFP_KERNEL);
spin_lock_irqsave(&drm_minor_lock, flags);
minor_id = idr_alloc(&drm_minors_idr,
NULL,
64 * type,
64 * (type + 1),
GFP_NOWAIT);
spin_unlock_irqrestore(&drm_minor_lock, flags);
idr_preload_end();
 
if (minor_id < 0)
return minor_id;
 
new_minor->index = minor_id;
 
ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
goto err_id;
}
 
ret = drm_sysfs_device_add(new_minor);
if (ret) {
DRM_ERROR("DRM: Error sysfs_device_add.\n");
goto err_debugfs;
}
 
/* replace NULL with @minor so lookups will succeed from now on */
spin_lock_irqsave(&drm_minor_lock, flags);
idr_replace(&drm_minors_idr, new_minor, new_minor->index);
spin_unlock_irqrestore(&drm_minor_lock, flags);
 
DRM_DEBUG("new minor assigned %d\n", minor_id);
return 0;
 
err_debugfs:
drm_debugfs_cleanup(new_minor);
err_id:
spin_lock_irqsave(&drm_minor_lock, flags);
idr_remove(&drm_minors_idr, minor_id);
spin_unlock_irqrestore(&drm_minor_lock, flags);
new_minor->index = 0;
return ret;
}
 
static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
{
struct drm_minor *minor;
unsigned long flags;
 
minor = *drm_minor_get_slot(dev, type);
if (!minor || !minor->kdev)
return;
 
spin_lock_irqsave(&drm_minor_lock, flags);
idr_remove(&drm_minors_idr, minor->index);
spin_unlock_irqrestore(&drm_minor_lock, flags);
minor->index = 0;
 
drm_debugfs_cleanup(minor);
drm_sysfs_device_remove(minor);
}
 
/**
* drm_minor_acquire - Acquire a DRM minor
* @minor_id: Minor ID of the DRM-minor
*
* Looks up the given minor-ID and returns the respective DRM-minor object. The
* refence-count of the underlying device is increased so you must release this
* object with drm_minor_release().
*
* As long as you hold this minor, it is guaranteed that the object and the
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
*
* Returns:
* Pointer to minor-object with increased device-refcount, or PTR_ERR on
* failure.
*/
struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
struct drm_minor *minor;
unsigned long flags;
 
spin_lock_irqsave(&drm_minor_lock, flags);
minor = idr_find(&drm_minors_idr, minor_id);
if (minor)
drm_dev_ref(minor->dev);
spin_unlock_irqrestore(&drm_minor_lock, flags);
 
if (!minor) {
return ERR_PTR(-ENODEV);
} else if (drm_device_is_unplugged(minor->dev)) {
drm_dev_unref(minor->dev);
return ERR_PTR(-ENODEV);
}
 
return minor;
}
 
/**
* drm_minor_release - Release DRM minor
* @minor: Pointer to DRM minor object
*
* Release a minor that was previously acquired via drm_minor_acquire().
*/
void drm_minor_release(struct drm_minor *minor)
{
drm_dev_unref(minor->dev);
}
 
/**
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
*
* Called at module unload time or when a PCI device is unplugged.
*
* Use of this function is discouraged. It will eventually go away completely.
* Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
*
* Cleans up all DRM device, calling drm_lastclose().
*/
void drm_put_dev(struct drm_device *dev)
{
DRM_DEBUG("\n");
 
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
}
 
drm_dev_unregister(dev);
drm_dev_unref(dev);
}
EXPORT_SYMBOL(drm_put_dev);
 
void drm_unplug_dev(struct drm_device *dev)
{
/* for a USB device */
drm_minor_unregister(dev, DRM_MINOR_LEGACY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 
mutex_lock(&drm_global_mutex);
 
drm_device_set_unplugged(dev);
 
if (dev->open_count == 0) {
drm_put_dev(dev);
}
mutex_unlock(&drm_global_mutex);
}
EXPORT_SYMBOL(drm_unplug_dev);
 
/*
* DRM internal mount
* We want to be able to allocate our own "struct address_space" to control
* memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
* stand-alone address_space objects, so we need an underlying inode. As there
* is no way to allocate an independent inode easily, we need a fake internal
* VFS mount-point.
*
* The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
* frees it again. You are allowed to use iget() and iput() to get references to
* the inode. But each drm_fs_inode_new() call must be paired with exactly one
* drm_fs_inode_free() call (which does not have to be the last iput()).
* We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
* between multiple inode-users. You could, technically, call
* iget() + drm_fs_inode_free() directly after alloc and sometime later do an
* iput(), but this way you'd end up with a new vfsmount for each inode.
*/
 
static int drm_fs_cnt;
static struct vfsmount *drm_fs_mnt;
 
static const struct dentry_operations drm_fs_dops = {
.d_dname = simple_dname,
};
 
static const struct super_operations drm_fs_sops = {
.statfs = simple_statfs,
};
 
static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
return mount_pseudo(fs_type,
"drm:",
&drm_fs_sops,
&drm_fs_dops,
0x010203ff);
}
 
static struct file_system_type drm_fs_type = {
.name = "drm",
.owner = THIS_MODULE,
.mount = drm_fs_mount,
.kill_sb = kill_anon_super,
};
 
#endif
 
 
 
 
 
int drm_fill_in_dev(struct drm_device *dev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
int retcode;
int ret;
dev->driver = driver;
 
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
104,7 → 511,7
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
 
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
113,22 → 520,21
// return -ENOMEM;
// }
 
dev->driver = driver;
 
 
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution "
"manager (GEM)\n");
goto error_out_unreg;
ret = drm_gem_init(dev);
if (ret) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
goto err_ctxbitmap;
}
}
 
return 0;
 
error_out_unreg:
err_ctxbitmap:
// drm_lastclose(dev);
return retcode;
return ret;
}
EXPORT_SYMBOL(drm_fill_in_dev);
/**
160,15 → 566,16
asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
}
 
void
drm_clflush_virt_range(char *addr, unsigned long length)
void drm_clflush_virt_range(void *addr, unsigned long length)
{
char *end = addr + length;
char *tmp = addr;
char *end = tmp + length;
mb();
for (; addr < end; addr += x86_clflush_size)
clflush(addr);
for (; tmp < end; tmp += x86_clflush_size)
clflush(tmp);
clflush(end - 1);
mb();
return;
}
 
 
/drivers/video/drm/i915/Gtt/agp.h
247,6 → 247,7
 
/* Chipset independent registers (from AGP Spec) */
#define AGP_APBASE 0x10
#define AGP_APERTURE_BAR 0
 
#define AGPSTAT 0x4
#define AGPCMD 0x8
/drivers/video/drm/i915/Gtt/intel-agp.h
55,8 → 55,8
#define INTEL_I860_ERRSTS 0xc8
 
/* Intel i810 registers */
#define I810_GMADDR 0x10
#define I810_MMADDR 0x14
#define I810_GMADR_BAR 0
#define I810_MMADR_BAR 1
#define I810_PTE_BASE 0x10000
#define I810_PTE_MAIN_UNCACHED 0x00000000
#define I810_PTE_LOCAL 0x00000002
113,9 → 113,9
#define INTEL_I850_ERRSTS 0xc8
 
/* intel 915G registers */
#define I915_GMADDR 0x18
#define I915_MMADDR 0x10
#define I915_PTEADDR 0x1C
#define I915_GMADR_BAR 2
#define I915_MMADR_BAR 0
#define I915_PTE_BAR 3
#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
/drivers/video/drm/i915/Gtt/intel-gtt.c
91,7 → 91,7
struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev;
u8 __iomem *registers;
phys_addr_t gtt_bus_addr;
phys_addr_t gtt_phys_addr;
u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
399,9 → 399,8
 
static int intel_gtt_init(void)
{
u32 gma_addr;
u32 gtt_map_size;
int ret;
int ret, bar;
 
ret = intel_private.driver->setup();
if (ret != 0)
427,7 → 426,7
 
intel_private.gtt = NULL;
if (intel_private.gtt == NULL)
intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
gtt_map_size);
if (intel_private.gtt == NULL) {
intel_private.driver->cleanup();
435,7 → 434,9
return -ENOMEM;
}
 
asm volatile("wbinvd":::"memory");
#if IS_ENABLED(CONFIG_AGP_INTEL)
global_cache_flush(); /* FIXME: ? */
#endif
 
intel_private.stolen_size = intel_gtt_stolen_size();
 
448,18 → 449,15
}
 
if (INTEL_GTT_GEN <= 2)
pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
&gma_addr);
bar = I810_GMADR_BAR;
else
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
bar = I915_GMADR_BAR;
 
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
 
intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
return 0;
}
 
 
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
552,6 → 550,7
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 
#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
struct page **pages,
567,7 → 566,58
readl(intel_private.gtt+j-1);
}
 
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
int ret = -EINVAL;
 
if (intel_private.clear_fake_agp) {
int start = intel_private.stolen_size / PAGE_SIZE;
int end = intel_private.gtt_mappable_entries;
intel_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false;
}
 
if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
return i810_insert_dcache_entries(mem, pg_start, type);
 
if (mem->page_count == 0)
goto out;
 
if (pg_start + mem->page_count > intel_private.gtt_total_entries)
goto out_err;
 
if (type != mem->type)
goto out_err;
 
if (!intel_private.driver->check_flags(type))
goto out_err;
 
if (!mem->is_flushed)
global_cache_flush();
 
if (intel_private.needs_dmar) {
struct sg_table st;
 
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
if (ret != 0)
return ret;
 
intel_gtt_insert_sg_entries(&st, pg_start, type);
mem->sg_list = st.sgl;
mem->num_sg = st.nents;
} else
intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
type);
 
out:
ret = 0;
out_err:
mem->is_flushed = true;
return ret;
}
#endif
 
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
{
unsigned int i;
693,13 → 743,11
 
static int i9xx_setup(void)
{
u32 reg_addr, gtt_addr;
phys_addr_t reg_addr;
int size = KB(512);
 
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
 
reg_addr &= 0xfff80000;
 
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
706,15 → 754,14
 
switch (INTEL_GTT_GEN) {
case 3:
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr;
intel_private.gtt_phys_addr =
pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
break;
case 5:
intel_private.gtt_bus_addr = reg_addr + MB(2);
intel_private.gtt_phys_addr = reg_addr + MB(2);
break;
default:
intel_private.gtt_bus_addr = reg_addr + KB(512);
intel_private.gtt_phys_addr = reg_addr + KB(512);
break;
}
 
894,10 → 941,13
 
intel_private.refcount++;
 
#if IS_ENABLED(CONFIG_AGP_INTEL)
if (bridge) {
bridge->driver = &intel_fake_agp_driver;
bridge->dev_private_data = &intel_private;
bridge->dev = bridge_pdev;
}
#endif
 
intel_private.bridge_dev = bridge_pdev;
 
/drivers/video/drm/i915/Makefile
9,18 → 9,20
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm \
-I./ -I./render
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux \
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/linux/uapi -I./
 
CFLAGS= -c -O2 $(INCLUDES) -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf
CFLAGS= -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -msse2 -fomit-frame-pointer -fno-ident -fno-builtin-printf
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields
 
LIBPATH:= $(DDK_TOPDIR)/
LIBPATH:= $(DDK_TOPDIR)
 
LIBS:= -lddk -lcore -lgcc
 
LDFLAGS = -nostdlib -shared -s -Map i915.map --image-base 0\
PE_FLAGS = --major-os-version 0 --minor-os-version 7 --major-subsystem-version 0 \
--minor-subsystem-version 5 --subsystem native
 
LDFLAGS = -nostdlib -shared -s $(PE_FLAGS) --image-base 0\
--file-alignment 512 --section-alignment 4096
 
 
46,23 → 48,29
dvo_ns2501.c \
dvo_sil164.c \
dvo_tfp410.c \
i915_cmd_parser.c \
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
i915_gem_execbuffer.c \
i915_gem_evict.c \
i915_gem_gtt.c \
i915_gem_render_state.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_gpu_error.c \
i915_irq.c \
i915_params.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dp_mst.c \
intel_dsi.c \
intel_dsi_cmd.c \
intel_dsi_panel_vbt.c \
intel_dsi_pll.c \
intel_dvo.c \
intel_fbdev.c \
72,6 → 80,9
intel_modes.c \
intel_panel.c \
intel_pm.c \
intel_renderstate_gen6.c \
intel_renderstate_gen7.c \
intel_renderstate_gen8.c \
intel_ringbuffer.c \
intel_sdvo.c \
intel_sideband.c \
92,6 → 103,7
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
../drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
98,7 → 110,10
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_modeset_lock.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_plane_helper.c \
$(DRM_TOPDIR)/drm_probe_helper.c \
$(DRM_TOPDIR)/drm_rect.c \
$(DRM_TOPDIR)/drm_stub.c
 
112,12 → 127,12
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) i915.lds Makefile
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) i915.lds Makefile
ld -L$(LIBPATH) $(LDFLAGS) -T i915.lds -o $@ $(NAME_OBJS) $(LIBS)
 
 
%.o : %.c $(HFILES) Makefile
$(CC) $(CFLAGS) $(DEFINES) -o $@ $<
$(CC) $(CFLAGS) -o $@ $<
 
%.o : %.S $(HFILES) Makefile
as -o $@ $<
/drivers/video/drm/i915/Makefile.lto
8,11 → 8,10
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm \
-I./ -I./render
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux \
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/linux/uapi -I./
 
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -mno-stack-arg-probe
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT)
 
20,9 → 19,11
 
LIBS:= -lddk -lcore -lgcc
 
LDFLAGS = -e,_drvEntry,-nostdlib,-shared,-s,--image-base,0,--file-alignment,512,--section-alignment,4096
PE_FLAGS = --major-os-version,0,--minor-os-version,7,--major-subsystem-version,0,--minor-subsystem-version,5,--subsystem,native
 
LDFLAGS = -e,_drvEntry,-nostdlib,-shared,-s,$(PE_FLAGS),--image-base,0,--file-alignment,512,--section-alignment,4096
 
 
NAME:= i915
 
HFILES:= $(DRV_INCLUDES)/linux/types.h \
45,23 → 46,29
dvo_ns2501.c \
dvo_sil164.c \
dvo_tfp410.c \
i915_cmd_parser.c \
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_context.c \
i915_gem_execbuffer.c \
i915_gem_evict.c \
i915_gem_gtt.c \
i915_gem_render_state.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_gpu_error.c \
i915_irq.c \
i915_params.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_dp_mst.c \
intel_dsi.c \
intel_dsi_cmd.c \
intel_dsi_panel_vbt.c \
intel_dsi_pll.c \
intel_dvo.c \
intel_fbdev.c \
71,6 → 78,9
intel_modes.c \
intel_panel.c \
intel_pm.c \
intel_renderstate_gen6.c \
intel_renderstate_gen7.c \
intel_renderstate_gen8.c \
intel_ringbuffer.c \
intel_sdvo.c \
intel_sideband.c \
91,6 → 101,7
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
../drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
97,7 → 108,10
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_modeset_lock.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_plane_helper.c \
$(DRM_TOPDIR)/drm_probe_helper.c \
$(DRM_TOPDIR)/drm_rect.c \
$(DRM_TOPDIR)/drm_stub.c
 
111,7 → 125,7
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) i915.lds Makefile.lto
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) i915.lds Makefile.lto
$(CC) $(CFLAGS_OPT) -fwhole-program -nostdlib -Wl,-L$(LIBPATH),$(LDFLAGS),-T,i915.lds -o $@ $(NAME_OBJS) $(LIBS)
 
 
/drivers/video/drm/i915/dvo_ch7xxx.c
160,7 → 160,7
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
}
 
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
340,9 → 340,9
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
uint8_t val;
if ((i % 8) == 0)
DRM_LOG_KMS("\n %02X: ", i);
DRM_DEBUG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
DRM_LOG_KMS("%02X ", val);
DRM_DEBUG_KMS("%02X ", val);
}
}
 
/drivers/video/drm/i915/dvo_ivch.c
195,7 → 195,7
if (i2c_transfer(adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
};
}
 
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
377,41 → 377,41
uint16_t val;
 
ivch_read(dvo, VR00, &val);
DRM_LOG_KMS("VR00: 0x%04x\n", val);
DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
DRM_LOG_KMS("VR01: 0x%04x\n", val);
DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
DRM_LOG_KMS("VR30: 0x%04x\n", val);
DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
DRM_LOG_KMS("VR40: 0x%04x\n", val);
DRM_DEBUG_KMS("VR40: 0x%04x\n", val);
 
/* GPIO registers */
ivch_read(dvo, VR80, &val);
DRM_LOG_KMS("VR80: 0x%04x\n", val);
DRM_DEBUG_KMS("VR80: 0x%04x\n", val);
ivch_read(dvo, VR81, &val);
DRM_LOG_KMS("VR81: 0x%04x\n", val);
DRM_DEBUG_KMS("VR81: 0x%04x\n", val);
ivch_read(dvo, VR82, &val);
DRM_LOG_KMS("VR82: 0x%04x\n", val);
DRM_DEBUG_KMS("VR82: 0x%04x\n", val);
ivch_read(dvo, VR83, &val);
DRM_LOG_KMS("VR83: 0x%04x\n", val);
DRM_DEBUG_KMS("VR83: 0x%04x\n", val);
ivch_read(dvo, VR84, &val);
DRM_LOG_KMS("VR84: 0x%04x\n", val);
DRM_DEBUG_KMS("VR84: 0x%04x\n", val);
ivch_read(dvo, VR85, &val);
DRM_LOG_KMS("VR85: 0x%04x\n", val);
DRM_DEBUG_KMS("VR85: 0x%04x\n", val);
ivch_read(dvo, VR86, &val);
DRM_LOG_KMS("VR86: 0x%04x\n", val);
DRM_DEBUG_KMS("VR86: 0x%04x\n", val);
ivch_read(dvo, VR87, &val);
DRM_LOG_KMS("VR87: 0x%04x\n", val);
DRM_DEBUG_KMS("VR87: 0x%04x\n", val);
ivch_read(dvo, VR88, &val);
DRM_LOG_KMS("VR88: 0x%04x\n", val);
DRM_DEBUG_KMS("VR88: 0x%04x\n", val);
 
/* Scratch register 0 - AIM Panel type */
ivch_read(dvo, VR8E, &val);
DRM_LOG_KMS("VR8E: 0x%04x\n", val);
DRM_DEBUG_KMS("VR8E: 0x%04x\n", val);
 
/* Scratch register 1 - Status register */
ivch_read(dvo, VR8F, &val);
DRM_LOG_KMS("VR8F: 0x%04x\n", val);
DRM_DEBUG_KMS("VR8F: 0x%04x\n", val);
}
 
static void ivch_destroy(struct intel_dvo_device *dvo)
/drivers/video/drm/i915/dvo_ns2501.c
121,7 → 121,7
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
}
 
if (!ns->quiet) {
DRM_DEBUG_KMS
233,9 → 233,8
struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
mode->vtotal);
("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
 
/*
* Currently, these are all the modes I have data from.
261,9 → 260,8
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
DRM_DEBUG_KMS
("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
mode->vtotal);
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
 
/*
* Where do I find the native resolution for which scaling is not required???
277,8 → 275,7
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
/* mode 277 */
ns->reg_8_shadow &= ~NS2501_8_BPAS;
DRM_DEBUG_KMS("%s: switching to 800x600\n",
__FUNCTION__);
DRM_DEBUG_KMS("switching to 800x600\n");
 
/*
* No, I do not know where this data comes from.
341,8 → 338,7
 
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
/* mode 274 */
DRM_DEBUG_KMS("%s: switching to 640x480\n",
__FUNCTION__);
DRM_DEBUG_KMS("switching to 640x480\n");
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
406,8 → 402,7
 
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
/* mode 280 */
DRM_DEBUG_KMS("%s: switching to 1024x768\n",
__FUNCTION__);
DRM_DEBUG_KMS("switching to 1024x768\n");
/*
* This might or might not work, actually. I'm silently
* assuming here that the native panel resolution is
458,8 → 453,7
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
 
DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
__FUNCTION__, enable);
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
 
ch = ns->reg_8_shadow;
 
490,15 → 484,15
uint8_t val;
 
ns2501_readb(dvo, NS2501_FREQ_LO, &val);
DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_FREQ_HI, &val);
DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG8, &val);
DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG9, &val);
DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REGC, &val);
DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val);
}
 
static void ns2501_destroy(struct intel_dvo_device *dvo)
/drivers/video/drm/i915/dvo_sil164.c
93,7 → 93,7
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
}
 
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
246,15 → 246,15
uint8_t val;
 
sil164_readb(dvo, SIL164_FREQ_LO, &val);
DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
sil164_readb(dvo, SIL164_FREQ_HI, &val);
DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG8, &val);
DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG9, &val);
DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REGC, &val);
DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val);
}
 
static void sil164_destroy(struct intel_dvo_device *dvo)
/drivers/video/drm/i915/dvo_tfp410.c
118,7 → 118,7
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
}
 
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
267,33 → 267,33
uint8_t val, val2;
 
tfp410_readb(dvo, TFP410_REV, &val);
DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_1, &val);
DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_2, &val);
DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_3, &val);
DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_USERCFG, &val);
DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_DLY, &val);
DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CTL, &val);
DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_TOP, &val);
DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_H_RES_LO, &val);
tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_V_RES_LO, &val);
tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
 
static void tfp410_destroy(struct intel_dvo_device *dvo)
/drivers/video/drm/i915/i915_cmd_parser.c
0,0 → 1,1066
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Brad Volkin <bradley.d.volkin@intel.com>
*
*/
 
#include "i915_drv.h"
 
/**
* DOC: batch buffer command parser
*
* Motivation:
* Certain OpenGL features (e.g. transform feedback, performance monitoring)
* require userspace code to submit batches containing commands such as
* MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
* generations of the hardware will noop these commands in "unsecure" batches
* (which includes all userspace batches submitted via i915) even though the
* commands may be safe and represent the intended programming model of the
* device.
*
* The software command parser is similar in operation to the command parsing
* done in hardware for unsecure batches. However, the software parser allows
* some operations that would be noop'd by hardware, if the parser determines
* the operation is safe, and submits the batch as "secure" to prevent hardware
* parsing.
*
* Threats:
* At a high level, the hardware (and software) checks attempt to prevent
* granting userspace undue privileges. There are three categories of privilege.
*
* First, commands which are explicitly defined as privileged or which should
* only be used by the kernel driver. The parser generally rejects such
* commands, though it may allow some from the drm master process.
*
* Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser
* provides a whitelist of registers which userspace may safely access (for both
* normal and drm master processes).
*
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands.
*
* The majority of the problematic commands fall in the MI_* range, with only a
* few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
*
* Implementation:
* Each ring maintains tables of commands and registers which the parser uses in
* scanning batch buffers submitted to that ring.
*
* Since the set of commands that the parser must check for is significantly
* smaller than the number of commands supported, the parser tables contain only
* those commands required by the parser. This generally works because command
* opcode ranges have standard command length encodings. So for commands that
* the parser does not need to check, it can easily skip them. This is
* implementated via a per-ring length decoding vfunc.
*
* Unfortunately, there are a number of commands that do not follow the standard
* length encoding for their opcode range, primarily amongst the MI_* commands.
* To handle this, the parser provides a way to define explicit "skip" entries
* in the per-ring command tables.
*
* Other command table entries map fairly directly to high level categories
* mentioned above: rejected, master-only, register whitelist. The parser
* implements a number of checks, including the privileged memory checks, via a
* general bitmasking mechanism.
*/
 
#define STD_MI_OPCODE_MASK 0xFF800000
#define STD_3D_OPCODE_MASK 0xFFFF0000
#define STD_2D_OPCODE_MASK 0xFFC00000
#define STD_MFX_OPCODE_MASK 0xFFFF0000
 
#define CMD(op, opm, f, lm, fl, ...) \
{ \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
.cmd = { (op), (opm) }, \
.length = { (lm) }, \
__VA_ARGS__ \
}
 
/* Convenience macros to compress the tables */
#define SMI STD_MI_OPCODE_MASK
#define S3D STD_3D_OPCODE_MASK
#define S2D STD_2D_OPCODE_MASK
#define SMFX STD_MFX_OPCODE_MASK
#define F true
#define S CMD_DESC_SKIP
#define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK
#define M CMD_DESC_MASTER
 
/* Command Mask Fixed Len Action
---------------------------------------------------------- */
static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
 
static const struct drm_i915_cmd_descriptor render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
.bits = {{
.offset = 1,
.mask = MI_REPORT_PERF_COUNT_GGTT,
.expected = 0,
}}, ),
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
CMD( PIPELINE_SELECT, S3D, F, 1, S ),
CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
.bits = {{
.offset = 2,
.mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
.expected = 0,
}}, ),
CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
.bits = {{
.offset = 1,
.mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
.expected = 0,
},
{
.offset = 1,
.mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_STORE_DATA_INDEX),
.expected = 0,
.condition_offset = 1,
.condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
}}, ),
};
 
static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
CMD( MI_RS_CONTROL, SMI, F, 1, S ),
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
 
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
};
 
static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
.bits = {{
.offset = 0,
.mask = MI_FLUSH_DW_NOTIFY,
.expected = 0,
},
{
.offset = 1,
.mask = MI_FLUSH_DW_USE_GTT,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
},
{
.offset = 0,
.mask = MI_FLUSH_DW_STORE_INDEX,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
}}, ),
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
/*
* MFX_WAIT doesn't fit the way we handle length for most commands.
* It has a length field but it uses a non-standard length bias.
* It is always 1 dword though, so just treat it as fixed length.
*/
CMD( MFX_WAIT, SMFX, F, 1, S ),
};
 
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
.bits = {{
.offset = 0,
.mask = MI_FLUSH_DW_NOTIFY,
.expected = 0,
},
{
.offset = 1,
.mask = MI_FLUSH_DW_USE_GTT,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
},
{
.offset = 0,
.mask = MI_FLUSH_DW_STORE_INDEX,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
}}, ),
CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
};
 
static const struct drm_i915_cmd_descriptor blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
.bits = {{
.offset = 0,
.mask = MI_FLUSH_DW_NOTIFY,
.expected = 0,
},
{
.offset = 1,
.mask = MI_FLUSH_DW_USE_GTT,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
},
{
.offset = 0,
.mask = MI_FLUSH_DW_STORE_INDEX,
.expected = 0,
.condition_offset = 0,
.condition_mask = MI_FLUSH_DW_OP_MASK,
}}, ),
CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
};
 
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
 
#undef CMD
#undef SMI
#undef S3D
#undef S2D
#undef SMFX
#undef F
#undef S
#undef R
#undef W
#undef B
#undef M
 
static const struct drm_i915_cmd_table gen7_render_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) },
};
 
static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
};
 
static const struct drm_i915_cmd_table gen7_video_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ video_cmds, ARRAY_SIZE(video_cmds) },
};
 
static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ vecs_cmds, ARRAY_SIZE(vecs_cmds) },
};
 
static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) },
};
 
static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
};
 
/*
* Register whitelists, sorted by increasing register offset.
*
* Some registers that userspace accesses are 64 bits. The register
* access commands only allow 32-bit accesses. Hence, we have to include
* entries for both halves of the 64-bit registers.
*/
 
/* Convenience macro for adding 64-bit registers */
#define REG64(addr) (addr), (addr + sizeof(u32))
 
static const u32 gen7_render_regs[] = {
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
REG64(IA_VERTICES_COUNT),
REG64(IA_PRIMITIVES_COUNT),
REG64(VS_INVOCATION_COUNT),
REG64(GS_INVOCATION_COUNT),
REG64(GS_PRIMITIVES_COUNT),
REG64(CL_INVOCATION_COUNT),
REG64(CL_PRIMITIVES_COUNT),
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
OACONTROL, /* Only allowed for LRI and SRM. See below. */
GEN7_3DPRIM_END_OFFSET,
GEN7_3DPRIM_START_VERTEX,
GEN7_3DPRIM_VERTEX_COUNT,
GEN7_3DPRIM_INSTANCE_COUNT,
GEN7_3DPRIM_START_INSTANCE,
GEN7_3DPRIM_BASE_VERTEX,
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
GEN7_SO_WRITE_OFFSET(0),
GEN7_SO_WRITE_OFFSET(1),
GEN7_SO_WRITE_OFFSET(2),
GEN7_SO_WRITE_OFFSET(3),
GEN7_L3SQCREG1,
GEN7_L3CNTLREG2,
GEN7_L3CNTLREG3,
};
 
static const u32 gen7_blt_regs[] = {
BCS_SWCTRL,
};
 
static const u32 ivb_master_regs[] = {
FORCEWAKE_MT,
DERRMR,
GEN7_PIPE_DE_LOAD_SL(PIPE_A),
GEN7_PIPE_DE_LOAD_SL(PIPE_B),
GEN7_PIPE_DE_LOAD_SL(PIPE_C),
};
 
static const u32 hsw_master_regs[] = {
FORCEWAKE_MT,
DERRMR,
};
 
#undef REG64
 
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
 
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
if (subclient == INSTR_MEDIA_SUBCLIENT)
return 0xFFFF;
else
return 0xFF;
}
 
DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
return 0;
}
 
static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
 
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
if (subclient == INSTR_MEDIA_SUBCLIENT)
return 0xFFF;
else
return 0xFF;
}
 
DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
return 0;
}
 
static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
 
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_BC_CLIENT)
return 0xFF;
 
DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
 
static bool validate_cmds_sorted(struct intel_engine_cs *ring,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
int i;
bool ret = true;
 
if (!cmd_tables || cmd_table_count == 0)
return true;
 
for (i = 0; i < cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &cmd_tables[i];
u32 previous = 0;
int j;
 
for (j = 0; j < table->count; j++) {
const struct drm_i915_cmd_descriptor *desc =
&table->table[i];
u32 curr = desc->cmd.value & desc->cmd.mask;
 
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
ring->id, i, j, curr, previous);
ret = false;
}
 
previous = curr;
}
}
 
return ret;
}
 
static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
{
int i;
u32 previous = 0;
bool ret = true;
 
for (i = 0; i < reg_count; i++) {
u32 curr = reg_table[i];
 
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
ring_id, i, curr, previous);
ret = false;
}
 
previous = curr;
}
 
return ret;
}
 
static bool validate_regs_sorted(struct intel_engine_cs *ring)
{
return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
check_sorted(ring->id, ring->master_reg_table,
ring->master_reg_count);
}
 
struct cmd_node {
const struct drm_i915_cmd_descriptor *desc;
struct hlist_node node;
};
 
/*
* Different command ranges have different numbers of bits for the opcode. For
* example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
* problem is that, for example, MI commands use bits 22:16 for other fields
* such as GGTT vs PPGTT bits. If we include those bits in the mask then when
* we mask a command from a batch it could hash to the wrong bucket due to
* non-opcode bits being set. But if we don't include those bits, some 3D
* commands may hash to the same bucket due to not including opcode bits that
* make the command unique. For now, we will risk hashing to the same bucket.
*
* If we attempt to generate a perfect hash, we should be able to look at bits
* 31:29 of a command from a batch buffer and use the full mask for that
* client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
*/
#define CMD_HASH_MASK STD_MI_OPCODE_MASK
 
static int init_hash_table(struct intel_engine_cs *ring,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
int i, j;
 
hash_init(ring->cmd_hash);
 
for (i = 0; i < cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &cmd_tables[i];
 
for (j = 0; j < table->count; j++) {
const struct drm_i915_cmd_descriptor *desc =
&table->table[j];
struct cmd_node *desc_node =
kmalloc(sizeof(*desc_node), GFP_KERNEL);
 
if (!desc_node)
return -ENOMEM;
 
desc_node->desc = desc;
hash_add(ring->cmd_hash, &desc_node->node,
desc->cmd.value & CMD_HASH_MASK);
}
}
 
return 0;
}
 
static void fini_hash_table(struct intel_engine_cs *ring)
{
struct hlist_node *tmp;
struct cmd_node *desc_node;
int i;
 
hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
hash_del(&desc_node->node);
kfree(desc_node);
}
}
 
/**
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
* @ring: the ringbuffer to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
* struct intel_engine_cs based on whether the platform requires software
* command parsing.
*
* Return: non-zero if initialization fails
*/
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
{
const struct drm_i915_cmd_table *cmd_tables;
int cmd_table_count;
int ret;
 
if (!IS_GEN7(ring->dev))
return 0;
 
switch (ring->id) {
case RCS:
if (IS_HASWELL(ring->dev)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
} else {
cmd_tables = gen7_render_cmds;
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
 
ring->reg_table = gen7_render_regs;
ring->reg_count = ARRAY_SIZE(gen7_render_regs);
 
if (IS_HASWELL(ring->dev)) {
ring->master_reg_table = hsw_master_regs;
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
} else {
ring->master_reg_table = ivb_master_regs;
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
}
 
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
if (IS_HASWELL(ring->dev)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
cmd_tables = gen7_blt_cmds;
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
 
ring->reg_table = gen7_blt_regs;
ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
 
if (IS_HASWELL(ring->dev)) {
ring->master_reg_table = hsw_master_regs;
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
} else {
ring->master_reg_table = ivb_master_regs;
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
}
 
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
ring->id);
BUG();
}
 
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
 
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n");
fini_hash_table(ring);
return ret;
}
 
ring->needs_cmd_parser = true;
 
return 0;
}
 
/**
* i915_cmd_parser_fini_ring() - clean up cmd parser related fields
* @ring: the ringbuffer to clean up
*
* Releases any resources related to command parsing that may have been
* initialized for the specified ring.
*/
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
{
if (!ring->needs_cmd_parser)
return;
 
fini_hash_table(ring);
}
 
static const struct drm_i915_cmd_descriptor*
find_cmd_in_table(struct intel_engine_cs *ring,
u32 cmd_header)
{
struct cmd_node *desc_node;
 
hash_for_each_possible(ring->cmd_hash, desc_node, node,
cmd_header & CMD_HASH_MASK) {
const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
u32 masked_cmd = desc->cmd.mask & cmd_header;
u32 masked_value = desc->cmd.value & desc->cmd.mask;
 
if (masked_cmd == masked_value)
return desc;
}
 
return NULL;
}
 
/*
* Returns a pointer to a descriptor for the command specified by cmd_header.
*
* The caller must supply space for a default descriptor via the default_desc
* parameter. If no descriptor for the specified command exists in the ring's
* command parser tables, this function fills in default_desc based on the
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
find_cmd(struct intel_engine_cs *ring,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
const struct drm_i915_cmd_descriptor *desc;
u32 mask;
 
desc = find_cmd_in_table(ring, cmd_header);
if (desc)
return desc;
 
mask = ring->get_cmd_length_mask(cmd_header);
if (!mask)
return NULL;
 
BUG_ON(!default_desc);
default_desc->flags = CMD_DESC_SKIP;
default_desc->length.mask = mask;
 
return default_desc;
}
 
static bool valid_reg(const u32 *table, int count, u32 addr)
{
if (table && count != 0) {
int i;
 
for (i = 0; i < count; i++) {
if (table[i] == addr)
return true;
}
}
 
return false;
}
 
static u32 *vmap_batch(struct drm_i915_gem_object *obj)
{
int i;
void *addr = NULL;
struct sg_page_iter sg_iter;
struct page **pages;
 
pages = kmalloc(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL) {
DRM_DEBUG_DRIVER("Failed to get space for pages\n");
goto finish;
}
 
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
pages[i] = sg_page_iter_page(&sg_iter);
i++;
}
 
// addr = vmap(pages, i, 0, PAGE_KERNEL);
if (addr == NULL) {
DRM_DEBUG_DRIVER("Failed to vmap pages\n");
goto finish;
}
 
finish:
if (pages)
free(pages);
return (u32*)addr;
}
 
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
*
* Only certain platforms require software batch buffer command parsing, and
* only when enabled via module paramter.
*
* Return: true if the ring requires software command parsing
*/
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
if (!ring->needs_cmd_parser)
return false;
 
/*
* XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
* disabled. That will cause all of the parser's PPGTT checks to
* fail. For now, disable parsing when PPGTT is off.
*/
if (!dev_priv->mm.aliasing_ppgtt)
return false;
 
return (i915.enable_cmd_parser == 1);
}
 
static bool check_cmd(const struct intel_engine_cs *ring,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd,
const bool is_master,
bool *oacontrol_set)
{
if (desc->flags & CMD_DESC_REJECT) {
DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
return false;
}
 
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
*cmd);
return false;
}
 
if (desc->flags & CMD_DESC_REGISTER) {
u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
 
/*
* OACONTROL requires some special handling for writes. We
* want to make sure that any batch which enables OA also
* disables it before the end of the batch. The goal is to
* prevent one process from snooping on the perf data from
* another process. To do that, we need to check the value
* that will be written to the register. Hence, limit
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
return false;
 
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[2] != 0);
}
 
if (!valid_reg(ring->reg_table,
ring->reg_count, reg_addr)) {
if (!is_master ||
!valid_reg(ring->master_reg_table,
ring->master_reg_count,
reg_addr)) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
reg_addr,
*cmd,
ring->id);
return false;
}
}
}
 
if (desc->flags & CMD_DESC_BITMASK) {
int i;
 
for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
u32 dword;
 
if (desc->bits[i].mask == 0)
break;
 
if (desc->bits[i].condition_mask != 0) {
u32 offset =
desc->bits[i].condition_offset;
u32 condition = cmd[offset] &
desc->bits[i].condition_mask;
 
if (condition == 0)
continue;
}
 
dword = cmd[desc->bits[i].offset] &
desc->bits[i].mask;
 
if (dword != desc->bits[i].expected) {
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
dword, ring->id);
return false;
}
}
}
 
return true;
}
 
#define LENGTH_BIAS 2
 
#if 0
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
* @is_master: is the submitting process the drm master?
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
*
* Return: non-zero if the parser finds violations or otherwise fails
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master)
{
int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
 
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
return ret;
}
 
batch_base = vmap_batch(batch_obj);
if (!batch_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
i915_gem_object_unpin_pages(batch_obj);
return -ENOMEM;
}
 
if (needs_clflush)
drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
 
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
 
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
u32 length;
 
if (*cmd == MI_BATCH_BUFFER_END)
break;
 
desc = find_cmd(ring, *cmd, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
ret = -EINVAL;
break;
}
 
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
 
if ((batch_end - cmd) < length) {
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
*cmd,
length,
batch_end - cmd);
ret = -EINVAL;
break;
}
 
if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
ret = -EINVAL;
break;
}
 
cmd += length;
}
 
if (oacontrol_set) {
DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
ret = -EINVAL;
}
 
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
}
 
// vunmap(batch_base);
 
i915_gem_object_unpin_pages(batch_obj);
 
return ret;
}
#endif
 
/**
* i915_cmd_parser_get_version() - get the cmd parser version number
*
* The cmd parser maintains a simple increasing integer version number suitable
* for passing to userspace clients to determine what operations are permitted.
*
* Return: the current version number of the cmd parser
*/
int i915_cmd_parser_get_version(void)
{
/*
* Command parser version history
*
* 1. Initial version. Checks batches and reports violations, but leaves
* hardware parsing enabled (so does not allow new use cases).
*/
return 1;
}
/drivers/video/drm/i915/i915_dma.c
63,7 → 63,7
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
if (LP_RING(dev->dev_private)->obj == NULL) \
if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
 
82,7 → 82,7
 
void i915_update_dri1_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
 
/*
103,7 → 103,7
 
static void i915_write_hws_pga(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 addr;
 
addr = dev_priv->status_page_dmah->busaddr;
118,8 → 118,8
*/
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
 
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
139,9 → 139,10
 
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct intel_engine_cs *ring = LP_RING(dev_priv);
struct intel_ringbuffer *ringbuf = ring->buffer;
 
/*
* We should never lose context on the ring with modesetting
150,23 → 151,23
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
 
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0)
ring->space += ring->size;
ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
if (ringbuf->space < 0)
ringbuf->space += ringbuf->size;
 
if (!dev->primary->master)
return;
 
master_priv = dev->primary->master->driver_priv;
if (ring->head == ring->tail && master_priv->sarea_priv)
if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
 
static int i915_dma_cleanup(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* Make sure interrupts are disabled here because the uninstall ioctl
190,7 → 191,7
 
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret;
 
203,7 → 204,7
}
 
if (init->ring_size != 0) {
if (LP_RING(dev_priv)->obj != NULL) {
if (LP_RING(dev_priv)->buffer->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
235,12 → 236,12
 
static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
 
DRM_DEBUG_DRIVER("%s\n", __func__);
 
if (ring->virtual_start == NULL) {
if (ring->buffer->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
359,14 → 360,15
 
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
 
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
return -EINVAL;
 
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
 
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
435,7 → 437,7
 
static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
dev_priv->dri1.counter++;
551,7 → 553,7
 
static int i915_dispatch_flip(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
int ret;
629,10 → 631,9
static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
drm_i915_sarea_t *sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
struct drm_clip_rect *cliprects = NULL;
640,6 → 641,9
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
master_priv = dev->primary->master->driver_priv;
sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
 
if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
685,10 → 689,9
static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
drm_i915_sarea_t *sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
struct drm_clip_rect *cliprects = NULL;
void *batch_data;
700,6 → 703,9
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
master_priv = dev->primary->master->driver_priv;
sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
if (cmdbuf->num_cliprects < 0)
753,7 → 759,7
 
static int i915_emit_irq(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
i915_kernel_lost_context(dev);
779,10 → 785,10
 
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct intel_engine_cs *ring = LP_RING(dev_priv);
 
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
816,7 → 822,7
static int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_irq_emit_t *emit = data;
int result;
 
823,7 → 829,7
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
847,7 → 853,7
static int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_irq_wait_t *irqwait = data;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
864,7 → 870,7
static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_vblank_pipe_t *pipe = data;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
926,7 → 932,7
int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_getparam_t *param = data;
int value;
 
946,7 → 952,7
value = READ_BREADCRUMB(dev_priv);
break;
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
value = dev->pdev->device;
break;
case I915_PARAM_HAS_GEM:
value = 1;
995,7 → 1001,7
value = HAS_WT(dev);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
break;
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
1018,6 → 1024,9
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
value = 1;
break;
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version();
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
1032,7 → 1041,7
static int i915_setparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
 
if (!dev_priv) {
1093,11 → 1102,14
static void
intel_setup_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
 
if (IS_VALLEYVIEW(dev))
return;
 
dev_priv->mchbar_need_disable = false;
 
if (IS_I915G(dev) || IS_I915GM(dev)) {
1159,8 → 1171,8
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
 
main_fb_obj = kos_gem_fb_object_create(dev,0,16*1024*1024);
 
 
/* Initialise stolen first so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
1168,11 → 1180,13
if (ret)
goto cleanup_vga_switcheroo;
 
ret = drm_irq_install(dev);
intel_power_domains_init_hw(dev_priv);
 
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_gem_stolen;
 
intel_power_domains_init_hw(dev);
dev_priv->pm._irqs_disabled = false;
 
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
1180,7 → 1194,7
 
ret = i915_gem_init(dev);
if (ret)
goto cleanup_power;
goto cleanup_irq;
 
 
intel_modeset_gem_init(dev);
1188,10 → 1202,8
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0) {
intel_display_power_put(dev, POWER_DOMAIN_VGA);
if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
}
 
ret = intel_fbdev_init(dev);
if (ret)
1212,9 → 1224,6
*/
intel_fbdev_initial_config(dev);
 
/* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true;
 
drm_kms_helper_poll_init(dev);
 
return 0;
1224,10 → 1233,8
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_power:
intel_display_power_put(dev, POWER_DOMAIN_VGA);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
cleanup_irq:
// drm_irq_uninstall(dev);
cleanup_gem_stolen:
// i915_gem_cleanup_stolen(dev);
1242,15 → 1249,16
 
 
#if IS_ENABLED(CONFIG_FB)
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
bool primary;
int ret;
 
ap = alloc_apertures(1);
if (!ap)
return;
return -ENOMEM;
 
ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = dev_priv->gtt.mappable_end;
1258,28 → 1266,32
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 
remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
 
kfree(ap);
 
return ret;
}
#else
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
return 0;
}
#endif
 
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = dev_priv->info;
const struct intel_device_info *info = &dev_priv->info;
 
#define PRINT_S(name) "%s"
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
dev_priv->dev->pdev->revision,
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
1287,6 → 1299,62
#undef SEP_COMMA
}
 
/*
* Determine various intel_device_info fields at runtime.
*
* Use it when either:
* - it's judged too laborious to fill n static structures with the limit
* when a simple if statement does the job,
* - run-time checks (eg read fuse/strap registers) are needed.
*
* This function needs to be called:
* - after the MMIO has been setup as we are reading registers,
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
static void intel_device_info_runtime_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info;
enum pipe pipe;
 
info = (struct intel_device_info *)&dev_priv->info;
 
if (IS_VALLEYVIEW(dev))
for_each_pipe(pipe)
info->num_sprites[pipe] = 2;
else
for_each_pipe(pipe)
info->num_sprites[pipe] = 1;
 
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (info->num_pipes > 0 &&
(INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
!IS_VALLEYVIEW(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
 
/*
* SFUSE_STRAP is supposed to have a bit signalling the display
* is fused off. Unfortunately it seems that, at least in
* certain cases, fused off display means that PCH display
* reads don't land anywhere. In that case, we read 0s.
*
* On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
* should be set when taking over after the firmware.
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(dev_priv->pch_type == PCH_CPT &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0;
}
}
}
 
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
1301,7 → 1369,7
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info;
struct intel_device_info *info, *device_info;
int ret = 0, mmio_bar, mmio_size;
uint32_t aperture_size;
 
1312,15 → 1380,19
if (dev_priv == NULL)
return -ENOMEM;
 
dev->dev_private = (void *)dev_priv;
dev->dev_private = dev_priv;
dev_priv->dev = dev;
dev_priv->info = info;
 
/* copy initial configuration to dev_priv->info */
device_info = (struct intel_device_info *)&dev_priv->info;
*device_info = *info;
 
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
 
1363,8 → 1435,6
goto put_bridge;
}
 
intel_uncore_early_sanitize(dev);
 
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
 
1444,9 → 1514,7
* stuck interrupts on some machines.
*/
 
dev_priv->num_plane = 1;
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
intel_device_info_runtime_init(dev);
 
// if (INTEL_INFO(dev)->num_pipes) {
// ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1454,7 → 1522,7
// goto out_gem_unload;
// }
 
intel_power_domains_init(dev);
intel_power_domains_init(dev_priv);
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
1471,7 → 1539,6
if (INTEL_INFO(dev)->num_pipes) {
/* Must be done after probing outputs */
intel_opregion_init(dev);
// acpi_video_register();
}
 
if (IS_GEN5(dev))
1515,8 → 1582,8
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_display_set_init_power(dev, true);
intel_power_domains_remove(dev);
intel_display_set_init_power(dev_priv, true);
intel_power_domains_remove(dev_priv);
 
i915_teardown_sysfs(dev);
 
1552,8 → 1619,6
cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
 
cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
 
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
 
1564,11 → 1629,10
flush_workqueue(dev_priv->wq);
 
mutex_lock(&dev->struct_mutex);
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
 
if (!I915_NEED_GFX_HWS(dev))
1575,7 → 1639,6
i915_free_hws(dev);
}
 
list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
 
drm_vblank_cleanup(dev);
1583,6 → 1646,7
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
 
destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
 
1596,7 → 1660,7
kmem_cache_destroy(dev_priv->slab);
 
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
kfree(dev_priv);
 
return 0;
}
1604,20 → 1668,12
 
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
int ret;
 
DRM_DEBUG_DRIVER("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
ret = i915_gem_open(dev, file);
if (ret)
return ret;
 
file->driver_priv = file_priv;
 
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
 
idr_init(&file_priv->context_idr);
 
return 0;
}
 
1636,7 → 1692,7
*/
void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* On gen6+ we refuse to init without kms enabled, but then the drm core
* goes right around and calls lastclose. Check for this and don't clean
1655,11 → 1711,11
i915_dma_cleanup(dev);
}
 
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{
mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
i915_gem_context_close(dev, file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
}
 
1667,6 → 1723,8
{
struct drm_i915_file_private *file_priv = file->driver_priv;
 
if (file_priv && file_priv->bsd_ring)
file_priv->bsd_ring = NULL;
kfree(file_priv);
}
 
1720,9 → 1778,10
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
 
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
 
/*
* This is really ugly: Because old userspace abused the linux agp interface to
/drivers/video/drm/i915/i915_drv.c
38,6 → 38,7
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <linux/pci.h>
#include <drm/i915_pciids.h>
 
#include <drm/drm_crtc_helper.h>
 
45,125 → 46,32
 
#define __read_mostly
 
int init_display_kms(struct drm_device *dev);
static struct drm_driver driver;
 
static int i915_modeset __read_mostly = 1;
module_param_named(modeset, i915_modeset, int, 0400);
MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
"1=on, -1=force vga console preference [default])");
#define GEN_DEFAULT_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
 
unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
#define GEN_CHV_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
CHV_PIPE_C_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
CHV_TRANSCODER_C_OFFSET, }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
CHV_PALETTE_C_OFFSET }
 
int i915_panel_ignore_lid __read_mostly = 1;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
#define CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
 
unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
#define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
 
int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
int init_display_kms(struct drm_device *dev);
 
int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
 
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
 
unsigned int i915_lvds_downclock __read_mostly = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
 
int i915_lvds_channel_mode __read_mostly;
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
 
int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
 
int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
static bool i915_try_reset __read_mostly = true;
module_param_named(reset, i915_try_reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
bool i915_enable_hangcheck __read_mostly = false;
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
 
int i915_enable_ppgtt __read_mostly = -1;
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
 
int i915_enable_psr __read_mostly = 0;
module_param_named(enable_psr, i915_enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
 
unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support.");
 
int i915_disable_power_well __read_mostly = 1;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: true)");
 
int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
bool i915_fastboot __read_mostly = 0;
module_param_named(fastboot, i915_fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
"(default: false)");
 
int i915_enable_pc8 __read_mostly = 0;
module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
 
int i915_pc8_timeout __read_mostly = 5000;
module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
 
bool i915_prefault_disable __read_mostly;
module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
 
static struct drm_driver driver;
extern int intel_agp_enabled;
 
#define PCI_VENDOR_ID_INTEL 0x8086
173,6 → 81,8
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i915gm_info = {
.gen = 3, .is_mobile = 1, .num_pipes = 2,
181,11 → 91,15
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945g_info = {
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i945gm_info = {
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
194,6 → 108,8
.supports_tv = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_i965g_info = {
201,6 → 117,8
.has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_i965gm_info = {
209,6 → 127,8
.has_overlay = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_g33_info = {
216,6 → 136,8
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
.ring_mask = RENDER_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_g45_info = {
222,6 → 144,8
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_gm45_info = {
230,6 → 154,8
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_pineview_info = {
236,6 → 162,8
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_overlay = 1,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_ironlake_d_info = {
242,6 → 170,8
.gen = 5, .num_pipes = 2,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_ironlake_m_info = {
249,6 → 179,8
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_sandybridge_d_info = {
257,6 → 189,8
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_sandybridge_m_info = {
265,6 → 199,8
.has_fbc = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.has_llc = 1,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
#define GEN7_FEATURES \
277,6 → 213,8
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_ivybridge_m_info = {
283,6 → 221,8
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_ivybridge_q_info = {
289,6 → 229,8
GEN7_FEATURES,
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_valleyview_m_info = {
299,6 → 241,8
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_valleyview_d_info = {
308,6 → 252,8
.display_mmio_offset = VLV_DISPLAY_BASE,
.has_fbc = 0, /* legal, last one wins */
.has_llc = 0, /* legal, last one wins */
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_haswell_d_info = {
316,6 → 262,8
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_haswell_m_info = {
325,6 → 273,8
.has_ddi = 1,
.has_fpga_dbg = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_broadwell_d_info = {
333,6 → 283,10
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_broadwell_m_info = {
341,8 → 295,47
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_broadwell_gt3d_info = {
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_broadwell_gt3m_info = {
.gen = 8, .is_mobile = 1, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_cherryview_info = {
.is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
};
 
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
371,8 → 364,11
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
INTEL_BDW_D_IDS(&intel_broadwell_d_info)
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
INTEL_CHV_IDS(&intel_cherryview_info)
 
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
388,7 → 384,7
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pch;
struct pci_dev *pch = NULL;
 
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
409,12 → 405,9
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
while (pch) {
struct pci_dev *curr = pch;
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
 
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
446,17 → 439,16
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
} else {
goto check_next;
}
} else
continue;
 
break;
}
check_next:
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
// pci_dev_put(curr);
}
if (!pch)
DRM_DEBUG_KMS("No PCH found?\n");
DRM_DEBUG_KMS("No PCH found.\n");
 
// pci_dev_put(pch);
}
 
bool i915_semaphore_is_enabled(struct drm_device *dev)
464,15 → 456,13
if (INTEL_INFO(dev)->gen < 6)
return false;
 
if (i915.semaphores >= 0)
return i915.semaphores;
 
/* Until we get further testing... */
if (IS_GEN8(dev)) {
WARN_ON(!i915_preliminary_hw_support);
if (IS_GEN8(dev))
return false;
}
 
if (i915_semaphores >= 0)
return i915_semaphores;
 
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
483,13 → 473,27
}
 
#if 0
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct drm_encoder *encoder;
 
drm_modeset_lock_all(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 
if (intel_encoder->suspend)
intel_encoder->suspend(intel_encoder);
}
drm_modeset_unlock_all(dev);
}
 
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
pci_power_t opregion_target_state;
 
intel_runtime_pm_get(dev_priv);
 
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_SUSPENDED;
497,8 → 501,7
 
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
intel_display_set_init_power(dev, true);
intel_display_set_init_power(dev_priv, true);
 
drm_kms_helper_poll_disable(dev);
 
515,19 → 518,24
return error;
}
 
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
 
drm_irq_uninstall(dev);
dev_priv->enable_hotplug_processing = false;
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw.
* for _thaw. Also, power gate the CRTC power wells.
*/
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
dev_priv->display.crtc_disable(crtc);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
for_each_crtc(dev, crtc)
intel_crtc_control(crtc, false);
drm_modeset_unlock_all(dev);
 
intel_dp_mst_suspend(dev);
 
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
intel_runtime_pm_disable_interrupts(dev);
intel_suspend_encoders(dev_priv);
 
intel_suspend_gt_powersave(dev);
 
intel_modeset_suspend_hw(dev);
}
 
535,6 → 543,14
 
i915_save_state(dev);
 
opregion_target_state = PCI_D3cold;
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
if (acpi_target_system_state() < ACPI_STATE_S3)
opregion_target_state = PCI_D1;
#endif
intel_opregion_notify_adapter(dev, opregion_target_state);
 
intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
 
console_lock();
541,6 → 557,10
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
 
dev_priv->suspend_count++;
 
intel_display_set_init_power(dev_priv, false);
 
return 0;
}
 
586,33 → 606,24
console_unlock();
}
 
static void intel_resume_hotplug(struct drm_device *dev)
static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_disable_pc8(dev_priv);
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
 
mutex_unlock(&mode_config->mutex);
 
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
return 0;
}
 
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
 
intel_uncore_early_sanitize(dev);
 
intel_uncore_sanitize(dev);
 
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
restore_gtt_mappings) {
mutex_lock(&dev->struct_mutex);
620,8 → 631,6
mutex_unlock(&dev->struct_mutex);
}
 
intel_power_domains_init_hw(dev);
 
i915_restore_state(dev);
intel_opregion_setup(dev);
 
628,19 → 637,29
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_init_pch_refclk(dev);
drm_mode_config_reset(dev);
 
mutex_lock(&dev->struct_mutex);
 
error = i915_gem_init_hw(dev);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);
 
/* We need working interrupts for modeset enabling ... */
drm_irq_install(dev);
intel_runtime_pm_restore_interrupts(dev);
 
intel_modeset_init_hw(dev);
 
{
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
 
651,9 → 670,8
* notifications.
* */
intel_hpd_init(dev);
dev_priv->enable_hotplug_processing = true;
/* Config may have changed between suspend and resume */
intel_resume_hotplug(dev);
drm_helper_hpd_irq_event(dev);
}
 
intel_opregion_init(dev);
670,16 → 688,13
schedule_work(&dev_priv->console_resume_work);
}
 
/* Undo what we did at i915_drm_freeze so the refcount goes back to the
* expected level. */
hsw_enable_package_c8(dev_priv);
 
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
 
intel_runtime_pm_put(dev_priv);
return error;
intel_opregion_notify_adapter(dev, PCI_D0);
 
return 0;
}
 
static int i915_drm_thaw(struct drm_device *dev)
690,19 → 705,33
return __i915_drm_thaw(dev, true);
}
 
int i915_resume(struct drm_device *dev)
static int i915_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
/*
* We have a resume ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
* parent/child relationship we currently solve this with an early
* resume hook.
*
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
if (pci_enable_device(dev->pdev))
return -EIO;
 
pci_set_master(dev->pdev);
 
return i915_drm_thaw_early(dev);
}
 
int i915_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need to restore the GTT mappings since the BIOS might clear
716,6 → 745,14
return 0;
}
 
static int i915_resume_legacy(struct drm_device *dev)
{
i915_resume_early(dev);
i915_resume(dev);
 
return 0;
}
 
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
733,11 → 770,11
*/
int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
bool simulated;
int ret;
 
if (!i915_try_reset)
if (!i915.reset)
return 0;
 
mutex_lock(&dev->struct_mutex);
790,8 → 827,21
return ret;
}
 
drm_irq_uninstall(dev);
drm_irq_install(dev);
/*
* FIXME: This races pretty badly against concurrent holders of
* ring interrupts. This is possible since we've started to drop
* dev->struct_mutex in select places when waiting for the gpu.
*/
 
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
intel_reset_gt_powersave(dev);
 
intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
805,7 → 855,7
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
 
if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
DRM_INFO("This hardware requires preliminary hardware support.\n"
"See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
return -ENODEV;
819,17 → 869,7
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
 
/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
* implementation for gen3 (and only gen3) that used legacy drm maps
* (gasp!) to share buffers between X and the client. Hence we need to
* keep around the fake agp stuff for gen3, even when kms is enabled. */
if (intel_info->gen != 3) {
driver.driver_features &=
~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
} else if (!intel_agp_enabled) {
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
return -ENODEV;
}
driver.driver_features &= ~(DRIVER_USE_AGP);
 
return drm_get_pci_dev(pdev, ent, &driver);
}
846,7 → 886,6
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int error;
 
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
856,10 → 895,30
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
error = i915_drm_freeze(drm_dev);
if (error)
return error;
return i915_drm_freeze(drm_dev);
}
 
static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = drm_dev->dev_private;
 
/*
* We have a suspedn ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
* parent/child relationship we currently solve this with an late
* suspend hook.
*
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
hsw_enable_pc8(dev_priv);
 
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
 
866,6 → 925,14
return 0;
}
 
static int i915_pm_resume_early(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_resume_early(drm_dev);
}
 
static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
887,6 → 954,14
return i915_drm_freeze(drm_dev);
}
 
static int i915_pm_thaw_early(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_drm_thaw_early(drm_dev);
}
 
static int i915_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
903,9 → 978,550
return i915_drm_freeze(drm_dev);
}
 
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
 
return 0;
}
 
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
intel_init_pch_refclk(dev);
 
return 0;
}
 
static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
{
hsw_disable_pc8(dev_priv);
 
return 0;
}
 
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
* defined in the VLV2_S0IXRegs document. This documents marks all Gunit
* registers in the following way:
* - Driver: saved/restored by the driver
* - Punit : saved/restored by the Punit firmware
* - No, w/o marking: no need to save/restore, since the register is R/O or
* used internally by the HW in a way that doesn't depend
* keeping the content across a suspend/resume.
* - Debug : used for debugging
*
* We save/restore all registers marked with 'Driver', with the following
* exceptions:
* - Registers out of use, including also registers marked with 'Debug'.
* These have no effect on the driver's operation, so we don't save/restore
* them to reduce the overhead.
* - Registers that are fully setup by an initialization function called from
* the resume path. For example many clock gating and RPS/RC6 registers.
* - Registers that provide the right functionality with their reset defaults.
*
* TODO: Except for registers that based on the above 3 criteria can be safely
* ignored, we save/restore all others, practically treating the HW context as
* a black-box for the driver. Further investigation is needed to reduce the
* saved/restored registers even further, by following the same 3 criteria.
*/
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
int i;
 
/* GAM 0x4000-0x4770 */
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
s->arb_mode = I915_READ(ARB_MODE);
s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
 
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
 
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
 
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
s->ecochk = I915_READ(GAM_ECOCHK);
s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
 
s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
 
/* MBC 0x9024-0x91D0, 0x8500 */
s->g3dctl = I915_READ(VLV_G3DCTL);
s->gsckgctl = I915_READ(VLV_GSCKGCTL);
s->mbctl = I915_READ(GEN6_MBCTL);
 
/* GCP 0x9400-0x9424, 0x8100-0x810C */
s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
s->rstctl = I915_READ(GEN6_RSTCTL);
s->misccpctl = I915_READ(GEN7_MISCCPCTL);
 
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
s->gfxpause = I915_READ(GEN6_GFXPAUSE);
s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
s->rpdeuc = I915_READ(GEN6_RPDEUC);
s->ecobus = I915_READ(ECOBUS);
s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
s->rcedata = I915_READ(VLV_RCEDATA);
s->spare2gh = I915_READ(VLV_SPAREG2H);
 
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
s->gt_imr = I915_READ(GTIMR);
s->gt_ier = I915_READ(GTIER);
s->pm_imr = I915_READ(GEN6_PMIMR);
s->pm_ier = I915_READ(GEN6_PMIER);
 
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
 
/* GT SA CZ domain, 0x100000-0x138124 */
s->tilectl = I915_READ(TILECTL);
s->gt_fifoctl = I915_READ(GTFIFOCTL);
s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
s->pmwgicz = I915_READ(VLV_PMWGICZ);
 
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
 
/*
* Not saving any of:
* DFT, 0x9800-0x9EC0
* SARB, 0xB000-0xB1FC
* GAC, 0x5208-0x524C, 0x14000-0x14C000
* PCI CFG
*/
}
 
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
u32 val;
int i;
 
/* GAM 0x4000-0x4770 */
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
 
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
 
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
 
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
I915_WRITE(GAM_ECOCHK, s->ecochk);
I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
 
I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
 
/* MBC 0x9024-0x91D0, 0x8500 */
I915_WRITE(VLV_G3DCTL, s->g3dctl);
I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
I915_WRITE(GEN6_MBCTL, s->mbctl);
 
/* GCP 0x9400-0x9424, 0x8100-0x810C */
I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
I915_WRITE(GEN6_RSTCTL, s->rstctl);
I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
 
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
I915_WRITE(ECOBUS, s->ecobus);
I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
I915_WRITE(VLV_RCEDATA, s->rcedata);
I915_WRITE(VLV_SPAREG2H, s->spare2gh);
 
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
I915_WRITE(GTIMR, s->gt_imr);
I915_WRITE(GTIER, s->gt_ier);
I915_WRITE(GEN6_PMIMR, s->pm_imr);
I915_WRITE(GEN6_PMIER, s->pm_ier);
 
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
 
/* GT SA CZ domain, 0x100000-0x138124 */
I915_WRITE(TILECTL, s->tilectl);
I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
/*
* Preserve the GT allow wake and GFX force clock bit, they are not
* be restored, as they are used to control the s0ix suspend/resume
* sequence by the caller.
*/
val = I915_READ(VLV_GTLC_WAKE_CTRL);
val &= VLV_GTLC_ALLOWWAKEREQ;
val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
 
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= VLV_GFX_CLK_FORCE_ON_BIT;
val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
 
I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
 
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
#endif
 
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
u32 val;
int err;
 
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
 
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
/* Wait for a previous force-off to settle */
if (force_on) {
err = wait_for(!COND, 20);
if (err) {
DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
return err;
}
}
 
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
if (force_on)
val |= VLV_GFX_CLK_FORCE_ON_BIT;
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
 
if (!force_on)
return 0;
 
err = wait_for(COND, 20);
if (err)
DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
 
return err;
#undef COND
}
#if 0
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
u32 val;
int err = 0;
 
val = I915_READ(VLV_GTLC_WAKE_CTRL);
val &= ~VLV_GTLC_ALLOWWAKEREQ;
if (allow)
val |= VLV_GTLC_ALLOWWAKEREQ;
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
POSTING_READ(VLV_GTLC_WAKE_CTRL);
 
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
allow)
err = wait_for(COND, 1);
if (err)
DRM_ERROR("timeout disabling GT waking\n");
return err;
#undef COND
}
 
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
bool wait_for_on)
{
u32 mask;
u32 val;
int err;
 
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
val = wait_for_on ? mask : 0;
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
if (COND)
return 0;
 
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
wait_for_on ? "on" : "off",
I915_READ(VLV_GTLC_PW_STATUS));
 
/*
* RC6 transitioning can be delayed up to 2 msec (see
* valleyview_enable_rps), use 3 msec for safety.
*/
err = wait_for(COND, 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
wait_for_on ? "on" : "off");
 
return err;
#undef COND
}
 
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
{
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
return;
 
DRM_ERROR("GT register access while GT waking disabled\n");
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
 
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
{
u32 mask;
int err;
 
/*
* Bspec defines the following GT well on flags as debug only, so
* don't treat them as hard failures.
*/
(void)vlv_wait_for_gt_wells(dev_priv, false);
 
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
 
vlv_check_no_gt_access(dev_priv);
 
err = vlv_force_gfx_clock(dev_priv, true);
if (err)
goto err1;
 
err = vlv_allow_gt_wake(dev_priv, false);
if (err)
goto err2;
vlv_save_gunit_s0ix_state(dev_priv);
 
err = vlv_force_gfx_clock(dev_priv, false);
if (err)
goto err2;
 
return 0;
 
err2:
/* For safety always re-enable waking and disable gfx clock forcing */
vlv_allow_gt_wake(dev_priv, true);
err1:
vlv_force_gfx_clock(dev_priv, false);
 
return err;
}
 
static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int err;
int ret;
 
/*
* If any of the steps fail just try to continue, that's the best we
* can do at this point. Return the first error code (which will also
* leave RPM permanently disabled).
*/
ret = vlv_force_gfx_clock(dev_priv, true);
 
vlv_restore_gunit_s0ix_state(dev_priv);
 
err = vlv_allow_gt_wake(dev_priv, true);
if (!ret)
ret = err;
 
err = vlv_force_gfx_clock(dev_priv, false);
if (!ret)
ret = err;
 
vlv_check_no_gt_access(dev_priv);
 
intel_init_clock_gating(dev);
i915_gem_restore_fences(dev);
 
return ret;
}
 
static int intel_runtime_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
return -ENODEV;
 
WARN_ON(!HAS_RUNTIME_PM(dev));
assert_force_wake_inactive(dev_priv);
 
DRM_DEBUG_KMS("Suspending device\n");
 
/*
* We could deadlock here in case another thread holding struct_mutex
* calls RPM suspend concurrently, since the RPM suspend will wait
* first for this RPM suspend to finish. In this case the concurrent
* RPM resume will be followed by its RPM suspend counterpart. Still
* for consistency return -EAGAIN, which will reschedule this suspend.
*/
if (!mutex_trylock(&dev->struct_mutex)) {
DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
/*
* Bump the expiration timestamp, otherwise the suspend won't
* be rescheduled.
*/
pm_runtime_mark_last_busy(device);
 
return -EAGAIN;
}
/*
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
*/
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
/*
* rps.work can't be rearmed here, since we get here only after making
* sure the GPU is idle and the RPS freq is set to the minimum. See
* intel_mark_idle().
*/
cancel_work_sync(&dev_priv->rps.work);
intel_runtime_pm_disable_interrupts(dev);
 
if (IS_GEN6(dev)) {
ret = 0;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = hsw_runtime_suspend(dev_priv);
} else if (IS_VALLEYVIEW(dev)) {
ret = vlv_runtime_suspend(dev_priv);
} else {
ret = -ENODEV;
WARN_ON(1);
}
 
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_restore_interrupts(dev);
 
return ret;
}
 
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
dev_priv->pm.suspended = true;
 
/*
* current versions of firmware which depend on this opregion
* notification have repurposed the D1 definition to mean
* "runtime suspended" vs. what you would normally expect (D3)
* to distinguish it from notifications that might be sent
* via the suspend path.
*/
intel_opregion_notify_adapter(dev, PCI_D1);
 
DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
 
static int intel_runtime_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
WARN_ON(!HAS_RUNTIME_PM(dev));
 
DRM_DEBUG_KMS("Resuming device\n");
 
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
 
if (IS_GEN6(dev)) {
ret = snb_runtime_resume(dev_priv);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = hsw_runtime_resume(dev_priv);
} else if (IS_VALLEYVIEW(dev)) {
ret = vlv_runtime_resume(dev_priv);
} else {
WARN_ON(1);
ret = -ENODEV;
}
 
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev);
 
intel_runtime_pm_restore_interrupts(dev);
intel_reset_gt_powersave(dev);
 
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
else
DRM_DEBUG_KMS("Device resumed\n");
 
return ret;
}
 
static const struct dev_pm_ops i915_pm_ops = {
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.freeze = i915_pm_freeze,
.thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
.runtime_suspend = intel_runtime_suspend,
.runtime_resume = intel_runtime_resume,
};
 
static const struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
 
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
#endif
.llseek = noop_llseek,
};
#endif
 
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
/drivers/video/drm/i915/i915_drv.h
35,12 → 35,13
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#include <linux/scatterlist.h>
#include "i915_gem_gtt.h"
//#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
//#include <linux/backlight.h>
#include <linux/hashtable.h>
 
#include <linux/spinlock.h>
#include <linux/err.h>
63,7 → 64,7
 
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20080730"
#define DRIVER_DATE "20140725"
 
enum pipe {
INVALID_PIPE = -1,
70,7 → 71,8
PIPE_A = 0,
PIPE_B,
PIPE_C,
I915_MAX_PIPES
_PIPE_EDP,
I915_MAX_PIPES = _PIPE_EDP
};
#define pipe_name(p) ((p) + 'A')
 
78,7 → 80,8
TRANSCODER_A = 0,
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP = 0xF,
TRANSCODER_EDP,
I915_MAX_TRANSCODERS
};
#define transcoder_name(t) ((t) + 'A')
 
89,7 → 92,7
};
#define plane_name(p) ((p) + 'A')
 
#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
 
enum port {
PORT_A = 0,
101,7 → 104,7
};
#define port_name(p) ((p) + 'A')
 
#define I915_NUM_PHYS_VLV 1
#define I915_NUM_PHYS_VLV 2
 
enum dpio_channel {
DPIO_CH0,
124,15 → 127,25
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_PORT_DDI_A_2_LANES,
POWER_DOMAIN_PORT_DDI_A_4_LANES,
POWER_DOMAIN_PORT_DDI_B_2_LANES,
POWER_DOMAIN_PORT_DDI_B_4_LANES,
POWER_DOMAIN_PORT_DDI_C_2_LANES,
POWER_DOMAIN_PORT_DDI_C_4_LANES,
POWER_DOMAIN_PORT_DDI_D_2_LANES,
POWER_DOMAIN_PORT_DDI_D_4_LANES,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_PLLS,
POWER_DOMAIN_INIT,
 
POWER_DOMAIN_NUM,
};
 
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
140,14 → 153,6
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
 
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP))
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
 
enum hpd_pin {
HPD_NONE = 0,
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
169,18 → 174,36
I915_GEM_DOMAIN_VERTEX)
 
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
 
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
 
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
 
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
 
#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
if ((intel_connector)->base.encoder == (__encoder))
 
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
 
struct drm_i915_private;
struct i915_mmu_object;
 
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B,
DPLL_ID_PCH_PLL_A = 0,
DPLL_ID_PCH_PLL_B = 1,
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
};
#define I915_NUM_PLLS 2
 
189,6 → 212,7
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
uint32_t wrpll;
};
 
struct intel_shared_dpll {
199,6 → 223,8
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
/* The mode_set hook is optional and should be used together with the
* intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
223,12 → 249,6
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n);
 
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
int wrpll2_refcount;
};
 
/* Interface history:
*
* 1.1: Original.
246,18 → 266,6
#define WATCH_LISTS 0
#define WATCH_GTT 0
 
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
#define I915_GEM_PHYS_OVERLAY_REGS 3
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
 
struct drm_i915_gem_phys_object {
int id;
struct page **page_list;
drm_dma_handle_t *handle;
struct drm_i915_gem_object *cur_obj;
};
 
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
307,53 → 315,87
 
struct drm_i915_error_state {
struct kref ref;
struct timeval time;
 
char error_msg[128];
u32 reset_count;
u32 suspend_count;
 
/* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 gtier[4];
u32 ccid;
u32 derrmr;
u32 forcewake;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
u32 ctl[I915_NUM_RINGS];
u32 ipeir[I915_NUM_RINGS];
u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
u32 bbstate[I915_NUM_RINGS];
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
u32 done_reg;
u32 gac_eco;
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
u64 bbaddr[I915_NUM_RINGS];
u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
 
struct drm_i915_error_ring {
bool valid;
/* Software tracked state */
bool waiting;
int hangcheck_score;
enum intel_ring_hangcheck_action hangcheck_action;
int num_requests;
 
/* our own tracking of ring head and tail */
u32 cpu_ring_head;
u32 cpu_ring_tail;
 
u32 semaphore_seqno[I915_NUM_RINGS - 1];
 
/* Register state */
u32 tail;
u32 head;
u32 ctl;
u32 hws;
u32 ipeir;
u32 ipehr;
u32 instdone;
u32 bbstate;
u32 instpm;
u32 instps;
u32 seqno;
u64 bbaddr;
u64 acthd;
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
u32 semaphore_mboxes[I915_NUM_RINGS - 1];
 
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *ctx;
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
 
struct drm_i915_error_request {
long jiffies;
u32 seqno;
u32 tail;
} *requests;
int num_requests;
 
struct {
u32 gfx_mode;
union {
u64 pdp[4];
u32 pp_dir_base;
};
} vm_info;
 
pid_t pid;
char comm[TASK_COMM_LEN];
} ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
367,18 → 409,17
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
u32 userptr:1;
s32 ring:4;
u32 cache_level:3;
} **active_bo, **pinned_bo;
 
u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
int hangcheck_score[I915_NUM_RINGS];
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
};
 
struct intel_connector;
struct intel_crtc_config;
struct intel_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
410,13 → 451,15
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enable, bool scaled);
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
void (*get_plane_config)(struct intel_crtc *,
struct intel_plane_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
431,8 → 474,10
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
void (*update_primary_plane)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
481,7 → 526,7
unsigned fw_rendercount;
unsigned fw_mediacount;
 
struct delayed_work force_wake_work;
struct timer_list force_wake_timer;
};
 
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
516,9 → 561,15
struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
u8 num_sprites[I915_MAX_PIPES];
u8 gen;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
};
 
#undef DEFINE_FLAG
534,138 → 585,6
I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
};
 
typedef uint32_t gen6_gtt_pte_t;
 
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
struct list_head global_link;
unsigned long start; /* Start offset always 0 for dri2 */
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
 
struct {
dma_addr_t addr;
struct page *page;
} scratch;
 
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
 
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
 
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
bool use_scratch);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level cache_level);
void (*cleanup)(struct i915_address_space *vm);
};
 
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
* portion of the GTT which can be mapped by the CPU and remain both coherent
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
struct i915_gtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
 
unsigned long mappable_end; /* End offset that we can CPU map */
void *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
 
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
 
bool do_idle_maps;
 
int mtrr;
 
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
};
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
struct i915_hw_ppgtt {
struct i915_address_space base;
unsigned num_pd_entries;
union {
struct page **pt_pages;
struct page *gen8_pt_pages;
};
struct page **pd_pages;
int num_pd_pages;
int num_pt_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[4];
};
union {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[4];
};
int (*enable)(struct drm_device *dev);
};
 
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
* object into/from the address space.
*
* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
* will always be <= an objects lifetime. So object refcounting should cover us.
*/
struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
 
/** This object's place on the active/inactive lists */
struct list_head mm_list;
 
struct list_head vma_link; /* Link in the object's VMA list */
 
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
 
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
 
};
 
struct i915_ctx_hang_stats {
/* This context had batch pending when hang was declared */
unsigned batch_pending;
681,27 → 600,48
};
 
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0
struct i915_hw_context {
#define DEFAULT_CONTEXT_HANDLE 0
/**
* struct intel_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
* @file_priv: filp associated with this context (NULL for global default
* context).
* @hang_stats: information about the role of this context in possible GPU
* hangs.
* @vm: virtual memory space used by this context.
* @legacy_hw_ctx: render context backing object and whether it is correctly
* initialized (legacy ring submission mechanism only).
* @link: link in the global list of contexts.
*
* Contexts are memory images used by the hardware to store copies of their
* internal state.
*/
struct intel_context {
struct kref ref;
int id;
bool is_initialized;
int user_handle;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
 
struct {
struct drm_i915_gem_object *rcs_state;
bool initialized;
} legacy_hw_ctx;
 
struct list_head link;
};
 
struct i915_fbc {
unsigned long size;
unsigned threshold;
unsigned int fb_id;
enum plane plane;
int y;
 
struct drm_mm_node *compressed_fb;
struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
 
struct intel_fbc_work {
725,9 → 665,19
} no_fbc_reason;
};
 
struct i915_drrs {
struct intel_connector *connector;
};
 
struct intel_dp;
struct i915_psr {
struct mutex lock;
bool sink_support;
bool source_ok;
struct intel_dp *enabled;
bool active;
struct delayed_work work;
unsigned busy_frontbuffer_bits;
};
 
enum intel_pch {
746,6 → 696,7
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
 
struct intel_fbdev;
struct intel_fbc_work;
843,11 → 794,7
u32 savePFIT_CONTROL;
u32 save_palette_a[256];
u32 save_palette_b[256];
u32 saveDPFC_CB_BASE;
u32 saveFBC_CFB_BASE;
u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL;
u32 saveFBC_CONTROL2;
u32 saveIER;
u32 saveIIR;
u32 saveIMR;
912,21 → 859,100
u32 savePCH_PORT_HOTPLUG;
};
 
struct vlv_s0ix_state {
/* GAM */
u32 wr_watermark;
u32 gfx_prio_ctrl;
u32 arb_mode;
u32 gfx_pend_tlb0;
u32 gfx_pend_tlb1;
u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
u32 media_max_req_count;
u32 gfx_max_req_count;
u32 render_hwsp;
u32 ecochk;
u32 bsd_hwsp;
u32 blt_hwsp;
u32 tlb_rd_addr;
 
/* MBC */
u32 g3dctl;
u32 gsckgctl;
u32 mbctl;
 
/* GCP */
u32 ucgctl1;
u32 ucgctl3;
u32 rcgctl1;
u32 rcgctl2;
u32 rstctl;
u32 misccpctl;
 
/* GPM */
u32 gfxpause;
u32 rpdeuhwtc;
u32 rpdeuc;
u32 ecobus;
u32 pwrdwnupctl;
u32 rp_down_timeout;
u32 rp_deucsw;
u32 rcubmabdtmr;
u32 rcedata;
u32 spare2gh;
 
/* Display 1 CZ domain */
u32 gt_imr;
u32 gt_ier;
u32 pm_imr;
u32 pm_ier;
u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
 
/* GT SA CZ domain */
u32 tilectl;
u32 gt_fifoctl;
u32 gtlc_wake_ctrl;
u32 gtlc_survive;
u32 pmwgicz;
 
/* Display 2 CZ domain */
u32 gu_ctl0;
u32 gu_ctl1;
u32 clock_gate_dis2;
};
 
struct intel_rps_ei {
u32 cz_clock;
u32 render_c0;
u32 media_c0;
};
 
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
u32 pm_iir;
 
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 rpe_delay;
u8 rp1_delay;
u8 rp0_delay;
u8 hw_max;
/* Frequencies are stored in potentially platform dependent multiples.
* In other words, *_freq needs to be multiplied by X to be interesting.
* Soft limits are those which are used for the dynamic reclocking done
* by the driver (raise frequencies under heavy loads, and lower for
* lighter loads). Hard limits are those imposed by the hardware.
*
* A distinction is made for overclocking, which is never enabled by
* default, and is considered to be above the hard limit if it's
* possible at all.
*/
u8 cur_freq; /* Current frequency (cached, may not == HW) */
u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u32 cz_freq;
 
u32 ei_interrupt_count;
 
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 
933,6 → 959,9
bool enabled;
struct delayed_work delayed_resume_work;
 
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
 
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
954,7 → 983,7
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
u64 last_time2;
unsigned long gfx_power;
u8 corr;
 
965,6 → 994,36
struct drm_i915_gem_object *renderctx;
};
 
struct drm_i915_private;
struct i915_power_well;
 
struct i915_power_well_ops {
/*
* Synchronize the well's hw state to match the current sw state, for
* example enable/disable it based on the current refcount. Called
* during driver init and resume time, possibly after first calling
* the enable/disable handlers.
*/
void (*sync_hw)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/*
* Enable the well and resources that depend on it (for example
* interrupts located on the well). Called after the 0->1 refcount
* transition.
*/
void (*enable)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/*
* Disable the well and resources that depend on it. Called after
* the 1->0 refcount transition.
*/
void (*disable)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/* Returns the hw enabled state. */
bool (*is_enabled)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
};
 
/* Power well structure for haswell */
struct i915_power_well {
const char *name;
971,12 → 1030,11
bool always_on;
/* power well enable/disable usage count */
int count;
/* cached hw enabled state */
bool hw_enabled;
unsigned long domains;
void *data;
void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
bool enable);
bool (*is_enabled)(struct drm_device *dev,
struct i915_power_well *power_well);
unsigned long data;
const struct i915_power_well_ops *ops;
};
 
struct i915_power_domains {
985,6 → 1043,7
* time are on. They are kept on until after the first modeset.
*/
bool init_power_on;
bool initializing;
int power_well_count;
 
struct mutex lock;
1070,14 → 1129,22
*/
bool interruptible;
 
/**
* Is the GPU currently considered idle, or busy executing userspace
* requests? Whilst idle, we attempt to power down the hardware and
* display clocks. In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
bool busy;
 
/* the indicator for dispatch video commands on two BSD rings */
int bsd_ring_dispatch_index;
 
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
 
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
size_t object_memory;
1148,8 → 1215,12
*/
wait_queue_head_t reset_queue;
 
/* For gpu hang simulation. */
unsigned int stop_rings;
/* Userspace knobs for gpu hang simulation;
* combines both a ring mask, and extra flags
*/
u32 stop_rings;
#define I915_STOP_RING_ALLOW_BAN (1 << 31)
#define I915_STOP_RING_ALLOW_WARN (1 << 30)
 
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
1169,6 → 1240,12
uint8_t supports_dp:1;
};
 
enum drrs_support_type {
DRRS_NOT_SUPPORTED = 0,
STATIC_DRRS_SUPPORT = 1,
SEAMLESS_DRRS_SUPPORT = 2
};
 
struct intel_vbt_data {
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1181,9 → 1258,12
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
unsigned int has_mipi:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
 
enum drrs_support_type drrs_type;
 
/* eDP */
int edp_rate;
int edp_lanes;
1196,12 → 1276,21
 
struct {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
} backlight;
 
/* MIPI DSI */
struct {
u16 port;
u16 panel_id;
struct mipi_config *config;
struct mipi_pps_data *pps;
u8 seq_version;
u32 size;
u8 *data;
u8 *sequence[MIPI_SEQ_MAX];
} dsi;
 
int crt_ddc_pin;
1235,76 → 1324,31
};
 
/*
* This struct tracks the state needed for the Package C8+ feature.
* This struct helps tracking the state needed for runtime PM, which puts the
* device in PCI D3 state. Notice that when this happens, nothing on the
* graphics device works, even register access, so we don't get interrupts nor
* anything else.
*
* Package states C8 and deeper are really deep PC states that can only be
* reached when all the devices on the system allow it, so even if the graphics
* device allows PC8+, it doesn't mean the system will actually get to these
* states.
* Every piece of our code that needs to actually touch the hardware needs to
* either call intel_runtime_pm_get or call intel_display_power_get with the
* appropriate power domain.
*
* Our driver only allows PC8+ when all the outputs are disabled, the power well
* is disabled and the GPU is idle. When these conditions are met, we manually
* do the other conditions: disable the interrupts, clocks and switch LCPLL
* refclk to Fclk.
* Our driver uses the autosuspend delay feature, which means we'll only really
* suspend if we stay with zero refcount for a certain amount of time. The
* default value is currently very conservative (see intel_init_runtime_pm), but
* it can be changed with the standard runtime PM files from sysfs.
*
* When we really reach PC8 or deeper states (not just when we allow it) we lose
* the state of some registers, so when we come back from PC8+ we need to
* restore this state. We don't get into PC8+ if we're not in RC6, so we don't
* need to take care of the registers kept by RC6.
*
* The interrupt disabling is part of the requirements. We can only leave the
* PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
* can lock the machine.
*
* Ideally every piece of our code that needs PC8+ disabled would call
* hsw_disable_package_c8, which would increment disable_count and prevent the
* system from reaching PC8+. But we don't have a symmetric way to do this for
* everything, so we have the requirements_met and gpu_idle variables. When we
* switch requirements_met or gpu_idle to true we decrease disable_count, and
* increase it in the opposite case. The requirements_met variable is true when
* all the CRTCs, encoders and the power well are disabled. The gpu_idle
* variable is true when the GPU is idle.
*
* In addition to everything, we only actually enable PC8+ if disable_count
* stays at zero for at least some seconds. This is implemented with the
* enable_work variable. We do this so we don't enable/disable PC8 dozens of
* consecutive times when all screens are disabled and some background app
* queries the state of our connectors, or we have some application constantly
* waking up to use the GPU. Only after the enable_work function actually
* enables PC8+ the "enable" variable will become true, which means that it can
* be false even if disable_count is 0.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
* goes back to false exactly before we reenable the IRQs. We use this variable
* to check if someone is trying to enable/disable IRQs while they're supposed
* to be disabled. This shouldn't happen and we'll print some error messages in
* case it happens, but if it actually happens we'll also update the variables
* inside struct regsave so when we restore the IRQs they will contain the
* latest expected values.
* case it happens.
*
* For more, read "Display Sequences for Package C8" on our documentation.
* For more, read the Documentation/power/runtime_pm.txt.
*/
struct i915_package_c8 {
bool requirements_met;
bool gpu_idle;
bool irqs_disabled;
/* Only true after the delayed work task actually enables it. */
bool enabled;
int disable_count;
struct mutex lock;
struct delayed_work enable_work;
 
struct {
uint32_t deimr;
uint32_t sdeimr;
uint32_t gtimr;
uint32_t gtier;
uint32_t gen6_pmimr;
} regsave;
};
 
struct i915_runtime_pm {
bool suspended;
bool _irqs_disabled;
};
 
enum intel_pipe_crc_source {
1337,10 → 1381,21
wait_queue_head_t wq;
};
 
typedef struct drm_i915_private {
struct i915_frontbuffer_tracking {
struct mutex lock;
 
/*
* Tracking bits for delayed frontbuffer flushing du to gpu activity or
* scheduled flips.
*/
unsigned busy_bits;
unsigned flip_bits;
};
 
struct drm_i915_private {
struct drm_device *dev;
 
const struct intel_device_info *info;
const struct intel_device_info info;
 
int relative_constants_mode;
 
1360,20 → 1415,27
*/
uint32_t gpio_mmio_base;
 
/* MMIO base address for MIPI regs */
uint32_t mipi_mmio_base;
 
wait_queue_head_t gmbus_wait_queue;
 
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
struct intel_engine_cs ring[I915_NUM_RINGS];
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
 
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
 
atomic_t irq_received;
 
/* protects the irq masks */
spinlock_t irq_lock;
 
/* protects the mmio flip data */
spinlock_t mmio_flip_lock;
 
bool display_irqs_enabled;
 
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
// struct pm_qos_request pm_qos;
 
1387,9 → 1449,10
};
u32 gt_irq_mask;
u32 pm_irq_mask;
u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
 
struct work_struct hotplug_work;
bool enable_hotplug_processing;
struct {
unsigned long hpd_last_jiffies;
int hpd_cnt;
1400,11 → 1463,10
} hpd_mark;
} hpd_stats[HPD_NUM_PINS];
u32 hpd_event_bits;
struct timer_list hotplug_reenable_timer;
struct delayed_work hotplug_reenable_work;
 
int num_plane;
 
struct i915_fbc fbc;
struct i915_drrs drrs;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
 
1422,6 → 1484,7
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int vlv_cdclk_freq;
 
/**
* wq - Driver workqueue for GEM.
1445,16 → 1508,19
struct mutex modeset_restore_lock;
 
struct list_head vm_list; /* Global list of all address spaces */
struct i915_gtt gtt; /* VMA representing the global address space */
struct i915_gtt gtt; /* VM representing the global address space */
 
struct i915_gem_mm mm;
#if defined(CONFIG_MMU_NOTIFIER)
DECLARE_HASHTABLE(mmu_notifiers, 7);
#endif
 
/* Kernel Modesetting */
 
struct sdvo_device_mapping sdvo_mappings[2];
 
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
 
#ifdef CONFIG_DEBUG_FS
1463,7 → 1529,6
 
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
/* Reclocking support */
1471,6 → 1536,9
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
 
struct i915_frontbuffer_tracking fb_tracking;
 
u16 orig_clock;
 
bool mchbar_need_disable;
1514,7 → 1582,9
 
u32 fdi_rx_config;
 
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
 
struct {
/*
1533,17 → 1603,34
struct ilk_wm_values hw;
} wm;
 
struct i915_package_c8 pc8;
 
struct i915_runtime_pm pm;
 
struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
u32 long_hpd_port_mask;
u32 short_hpd_port_mask;
struct work_struct dig_port_work;
 
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
* the non-DP HPD could block the workqueue on a mode config
* mutex getting, that userspace may have taken. However
* userspace is waiting on the DP workqueue to run which is
* blocked behind the non-DP one.
*/
struct workqueue_struct *dp_wq;
 
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
} drm_i915_private_t;
 
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
*/
};
 
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return dev->dev_private;
1579,8 → 1666,32
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
};
 
/*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-vise. This
* doesn't mean that the hw necessarily already scans it out, but that any
* rendering (by the cpu or gpu) will land in the frontbuffer eventually.
*
* We have one bit per pipe and per scanout plane type.
*/
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
#define INTEL_FRONTBUFFER_BITS \
(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_SPRITE(pipe) \
(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
 
struct drm_i915_gem_object {
struct drm_gem_object base;
 
1635,18 → 1746,6
*/
unsigned int fence_dirty:1;
 
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
* (via user_pin_count), execbuffer (objects are not allowed multiple
* times for the same batchbuffer), and the framebuffer code. When
* switching/pageflipping, the framebuffer code has at most two buffers
* pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 
/**
* Is the object at the current location in the gtt mappable and
* fenceable? Used to avoid costly recalculations.
1663,6 → 1762,12
unsigned int pin_display:1;
 
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
*/
unsigned long gt_ro:1;
 
/*
* Is the GPU currently using a fence to access this buffer,
*/
unsigned int pending_fenced_gpu_access:1;
1674,6 → 1779,8
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
 
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
struct sg_table *pages;
int pages_pin_count;
 
1681,7 → 1788,7
void *dma_buf_vmapping;
int vmapping_count;
 
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
 
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_read_seqno;
1703,12 → 1810,27
struct drm_file *pin_filp;
 
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
drm_dma_handle_t *phys_handle;
 
union {
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15
 
struct mm_struct *mm;
struct i915_mmu_object *mn;
struct work_struct *work;
} userptr;
};
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
 
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
 
/**
* Request queue structure.
*
1721,7 → 1843,7
*/
struct drm_i915_gem_request {
/** On Which ring this request was generated */
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
 
/** GEM sequence number associated with this request. */
uint32_t seqno;
1733,7 → 1855,7
u32 tail;
 
/** Context related to this request */
struct i915_hw_context *ctx;
struct intel_context *ctx;
 
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
1751,6 → 1873,7
 
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
 
struct {
spinlock_t lock;
1759,12 → 1882,102
} mm;
struct idr context_idr;
 
struct i915_ctx_hang_stats hang_stats;
atomic_t rps_wait_boost;
struct intel_engine_cs *bsd_ring;
};
 
#define INTEL_INFO(dev) (to_i915(dev)->info)
/*
* A command that requires special handling by the command parser.
*/
struct drm_i915_cmd_descriptor {
/*
* Flags describing how the command parser processes the command.
*
* CMD_DESC_FIXED: The command has a fixed length if this is set,
* a length mask if not set
* CMD_DESC_SKIP: The command is allowed but does not follow the
* standard length encoding for the opcode range in
* which it falls
* CMD_DESC_REJECT: The command is never allowed
* CMD_DESC_REGISTER: The command should be checked against the
* register whitelist for the appropriate ring
* CMD_DESC_MASTER: The command is allowed if the submitting process
* is the DRM master
*/
u32 flags;
#define CMD_DESC_FIXED (1<<0)
#define CMD_DESC_SKIP (1<<1)
#define CMD_DESC_REJECT (1<<2)
#define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK (1<<4)
#define CMD_DESC_MASTER (1<<5)
 
/*
* The command's unique identification bits and the bitmask to get them.
* This isn't strictly the opcode field as defined in the spec and may
* also include type, subtype, and/or subop fields.
*/
struct {
u32 value;
u32 mask;
} cmd;
 
/*
* The command's length. The command is either fixed length (i.e. does
* not include a length field) or has a length field mask. The flag
* CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
* a length mask. All command entries in a command table must include
* length information.
*/
union {
u32 fixed;
u32 mask;
} length;
 
/*
* Describes where to find a register address in the command to check
* against the ring's register whitelist. Only valid if flags has the
* CMD_DESC_REGISTER bit set.
*/
struct {
u32 offset;
u32 mask;
} reg;
 
#define MAX_CMD_DESC_BITMASKS 3
/*
* Describes command checks where a particular dword is masked and
* compared against an expected value. If the command does not match
* the expected value, the parser rejects it. Only valid if flags has
* the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
* are valid.
*
* If the check specifies a non-zero condition_mask then the parser
* only performs the check when the bits specified by condition_mask
* are non-zero.
*/
struct {
u32 offset;
u32 mask;
u32 expected;
u32 condition_offset;
u32 condition_mask;
} bits[MAX_CMD_DESC_BITMASKS];
};
 
/*
* A table of commands requiring special handling by the command parser.
*
* Each ring has an array of tables. Each table consists of an array of command
* descriptors, which must be sorted with command opcodes in ascending order.
*/
struct drm_i915_cmd_table {
const struct drm_i915_cmd_descriptor *table;
int count;
};
 
#define INTEL_INFO(dev) (&to_i915(dev)->info)
 
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1790,8 → 2003,9
(dev)->pdev->device == 0x0106 || \
(dev)->pdev->device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00)
1804,6 → 2018,9
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
(dev)->pdev->device == 0x0A1E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
/*
1824,15 → 2041,21
#define BSD_RING (1<<VCS)
#define BLT_RING (1<<BCS)
#define VEBOX_RING (1<<VECS)
#define BSD2_RING (1<<VCS2)
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
 
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1839,6 → 2062,14
 
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
*/
#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
 
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
1860,8 → 2091,8
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1877,6 → 2108,8
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
 
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
1885,41 → 2118,52
 
#include "i915_trace.h"
 
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_lvds_channel_mode __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
extern int i915_enable_psr __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_disable_power_well __read_mostly;
extern int i915_enable_ips __read_mostly;
extern bool i915_fastboot __read_mostly;
extern int i915_enable_pc8 __read_mostly;
extern int i915_pc8_timeout __read_mostly;
extern bool i915_prefault_disable __read_mostly;
 
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
/* i915_params.c */
struct i915_params {
int modeset;
int panel_ignore_lid;
unsigned int powersave;
int semaphores;
unsigned int lvds_downclock;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_fbc;
int enable_ppgtt;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool reset;
bool disable_display;
bool disable_vtd_wa;
int use_mmio_flip;
bool mmio_debug;
};
extern struct i915_params i915 __read_mostly;
 
/* i915_dma.c */
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
struct drm_file *file);
extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1934,28 → 2178,41
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 
extern void intel_console_resume(struct work_struct *work);
 
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
__printf(3, 4)
void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...);
 
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
int new_delay);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
 
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
 
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 status_mask);
 
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 status_mask);
 
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
 
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
1999,6 → 2256,9
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_init_userptr(struct drm_device *dev);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2010,22 → 2270,29
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm);
void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma);
 
#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
uint64_t flags);
int __must_check i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
 
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);
 
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
2049,9 → 2316,9
 
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
struct intel_engine_cs *to);
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_ring_buffer *ring);
struct intel_engine_cs *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
2071,31 → 2338,18
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++;
return true;
} else
return false;
}
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
static inline void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
}
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring);
 
bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
 
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
2112,23 → 2366,35
return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
}
 
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
{
return dev_priv->gpu_error.stop_rings == 0 ||
dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
}
 
static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
{
return dev_priv->gpu_error.stop_rings == 0 ||
dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
}
 
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_ring_buffer *ring,
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
2139,15 → 2405,10
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
struct intel_engine_cs *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
int id,
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
2182,6 → 2443,13
struct i915_address_space *vm);
 
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->pin_count > 0)
return true;
return false;
}
 
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
2213,66 → 2481,54
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
unsigned flags)
{
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
map_and_fenceable, nonblocking);
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
}
 
static inline int
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
{
return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
}
 
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
 
/* i915_gem_context.c */
#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_file *file, int to_id);
int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *to);
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
static inline void i915_gem_context_reference(struct intel_context *ctx)
{
kref_get(&ctx->ref);
}
 
static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
static inline void i915_gem_context_unreference(struct intel_context *ctx)
{
kref_put(&ctx->ref, i915_gem_context_free);
}
 
struct i915_ctx_hang_stats * __must_check
i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id);
static inline bool i915_gem_context_is_default(const struct intel_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
 
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
 
/* i915_gem_gtt.c */
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
 
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
 
 
/* i915_gem_render_state.c */
int i915_gem_render_state_init(struct intel_engine_cs *ring);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
2279,14 → 2535,22
int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
bool nonblock);
unsigned long start,
unsigned long end,
unsigned flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
 
/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
 
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
2296,12 → 2560,11
u32 stolen_offset,
u32 gtt_offset,
u32 size);
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
 
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
2339,7 → 2602,8
{
kfree(eb->buf);
}
void i915_capture_error_state(struct drm_device *dev);
void i915_capture_error_state(struct drm_device *dev, bool wedge,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2348,6 → 2612,16
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
 
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master);
 
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
2421,10 → 2695,12
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern void intel_connector_unregister(struct intel_connector *);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2431,8 → 2707,8
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
2443,6 → 2719,8
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
 
void intel_notify_mmio_flip(struct intel_engine_cs *ring);
 
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
2461,6 → 2739,7
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
 
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
2491,20 → 2770,6
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
 
void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
(((reg) >= 0x2000 && (reg) < 0x4000) ||\
((reg) >= 0x5000 && (reg) < 0x8000) ||\
((reg) >= 0xB000 && (reg) < 0x12000) ||\
((reg) >= 0x2E000 && (reg) < 0x30000))
 
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
(((reg) >= 0x12000 && (reg) < 0x14000) ||\
((reg) >= 0x22000 && (reg) < 0x24000) ||\
((reg) >= 0x30000 && (reg) < 0x40000))
 
#define FORCEWAKE_RENDER (1 << 0)
#define FORCEWAKE_MEDIA (1 << 1)
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
2523,9 → 2788,26
#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
 
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
* act upon the intermediate value, possibly leading to corruption and
* machine death. You have been warned.
*/
#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
u32 upper = I915_READ(upper_reg); \
u32 lower = I915_READ(lower_reg); \
u32 tmp = I915_READ(upper_reg); \
if (upper != tmp) { \
upper = tmp; \
lower = I915_READ(lower_reg); \
WARN_ON(I915_READ(upper_reg) != upper); \
} \
(u64)upper << 32 | lower; })
 
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
 
2536,10 → 2818,10
 
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
if (HAS_PCH_SPLIT(dev))
if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
else if (INTEL_INFO(dev)->gen >= 5)
return CPU_VGACNTRL;
else if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
else
return VGACNTRL;
}
2564,12 → 2846,33
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}
 
static inline int mutex_trylock(struct mutex *lock)
/*
* If you need to wait X milliseconds between events A and B, but event B
* doesn't happen exactly after event A, you record the timestamp (jiffies) of
* when event A happened, then just before event B you call this function and
* pass the timestamp as the first argument, and X as the second argument.
*/
static inline void
wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
{
if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1))
return 1;
return 0;
unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
 
/*
* Don't re-read the value of "jiffies" every time since it may change
* behind our back and break the math.
*/
tmp_jiffies = jiffies;
target_jiffies = timestamp_jiffies +
msecs_to_jiffies_timeout(to_wait_ms);
 
if (time_after(target_jiffies, tmp_jiffies)) {
remaining_jiffies = target_jiffies - tmp_jiffies;
while ((int)remaining_jiffies > 0) {
delay(remaining_jiffies);
remaining_jiffies = target_jiffies - jiffies;
}
}
}
 
typedef struct
{
/drivers/video/drm/i915/i915_gem.c
44,6 → 44,29
#define MAP_SHARED 0x01 /* Share changes */
 
 
u64 nsecs_to_jiffies64(u64 n)
{
#if (NSEC_PER_SEC % HZ) == 0
/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
return div_u64(n, NSEC_PER_SEC / HZ);
#elif (HZ % 512) == 0
/* overflow after 292 years if HZ = 1024 */
return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* Generic case - optimized for cases where HZ is a multiple of 3.
* overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
*/
return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
#endif
}
 
unsigned long nsecs_to_jiffies(u64 n)
{
return (unsigned long)nsecs_to_jiffies64(n);
}
 
 
struct drm_i915_gem_object *get_fb_obj();
 
unsigned long vm_mmap(struct file *file, unsigned long addr,
60,7 → 83,6
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
 
 
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
bool force);
67,16 → 89,8
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
static __must_check int
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file);
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj);
 
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
86,7 → 100,6
 
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
229,7 → 242,7
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
if (i915_gem_obj_is_pinned(obj))
pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
 
457,23 → 470,10
 
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
ret = i915_gem_object_wait_rendering(obj, true);
ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (ret)
return ret;
}
 
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
 
i915_gem_object_pin_pages(obj);
 
offset = args->offset;
 
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
504,7 → 504,7
 
mutex_unlock(&dev->struct_mutex);
 
if (likely(!i915_prefault_disable) && !prefaulted) {
if (likely(!i915.prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
520,12 → 520,10
 
mutex_lock(&dev->struct_mutex);
 
next_page:
mark_page_accessed(page);
 
if (ret)
goto out;
 
next_page:
remain -= page_length;
user_data += page_length;
offset += page_length;
629,13 → 627,13
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length, ret;
 
ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
if (ret)
goto out;
 
667,7 → 665,7
 
MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW);
 
memcpy(dev_priv->gtt.mappable+page_offset, user_data, page_length);
memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
 
remain -= page_length;
user_data += page_length;
675,7 → 673,7
}
 
out_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_ggtt_unpin(obj);
out:
return ret;
}
779,6 → 777,8
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
 
i915_gem_object_retire(obj);
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
973,8 → 973,8
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
static int
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
 
993,7 → 993,7
}
 
static bool missed_irq(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
1024,29 → 1024,30
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
struct timespec *timeout,
s64 *timeout,
struct drm_i915_file_private *file_priv)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
struct timespec before, now;
unsigned long timeout_expire, wait_time;
unsigned long timeout_expire;
s64 before, now;
 
wait_queue_t __wait;
int ret;
 
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
 
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
 
timeout_expire = timeout ? GetTimerTicks() + timespec_to_jiffies_timeout(timeout) : 0;
wait_time = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
 
if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
if (file_priv)
mod_delayed_work(dev_priv->wq,
1082,7 → 1083,7
break;
}
 
if (timeout && time_after_eq(GetTimerTicks(), timeout_expire)) {
if (timeout && time_after_eq(jiffies, timeout_expire)) {
ret = -ETIME;
break;
}
1107,6 → 1108,7
if (!irq_test_in_progress)
ring->irq_put(ring);
 
// finish_wait(&ring->irq_queue, &wait);
return ret;
}
 
1115,7 → 1117,7
* request and object lists appropriately for that event.
*/
int
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1140,9 → 1142,10
 
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
i915_gem_retire_requests_ring(ring);
if (!obj->active)
return 0;
 
/* Manually manage the write flush as we may have not yet
* retired the buffer.
1152,7 → 1155,6
* we know we have passed the last write.
*/
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
 
return 0;
}
1165,7 → 1167,7
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
struct intel_ring_buffer *ring = obj->ring;
struct intel_engine_cs *ring = obj->ring;
u32 seqno;
int ret;
 
1185,12 → 1187,12
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_file *file,
struct drm_i915_file_private *file_priv,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = obj->ring;
struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter;
u32 seqno;
int ret;
1212,7 → 1214,7
 
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
1261,7 → 1263,9
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
ret = i915_gem_object_wait_rendering__nonblocking(obj,
file->driver_priv,
!write_domain);
if (ret)
goto unref;
 
1468,12 → 1472,12
}
 
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
ret = -EINVAL;
DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
ret = -EFAULT;
goto out;
}
/* Now bind it into the GTT if needed */
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
if (ret)
goto out;
 
1502,7 → 1506,7
MapPage(ptr, pfn, PG_SHARED|PG_UW);
 
unpin:
i915_gem_object_unpin(obj);
i915_gem_object_unpin_pages(obj);
 
 
*offset = mem;
1538,12 → 1542,16
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
 
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
return obj->madv == I915_MADV_DONTNEED;
}
 
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
// struct inode *inode;
 
// i915_gem_object_free_mmap_offset(obj);
 
if (obj->base.filp == NULL)
1554,18 → 1562,28
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
// inode = obj->base.filp->f_path.dentry->d_inode;
// shmem_truncate_range(inode, 0, (loff_t)-1);
 
// shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
 
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
/* Try to discard unwanted pages */
static void
i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
return obj->madv == I915_MADV_DONTNEED;
struct address_space *mapping;
 
switch (obj->madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
case __I915_MADV_PURGED:
return;
}
 
if (obj->base.filp == NULL)
return;
 
}
 
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
1592,8 → 1610,6
 
page_cache_release(page);
}
//DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, page_count);
 
obj->dirty = 0;
 
sg_free_table(obj->pages);
1621,8 → 1637,7
ops->put_pages(obj);
obj->pages = NULL;
 
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
i915_gem_object_invalidate(obj);
 
return 0;
}
1723,8 → 1738,8
return 0;
 
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to obtain a purgeable object\n");
return -EINVAL;
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
 
BUG_ON(obj->pages_pin_count);
1737,9 → 1752,9
return 0;
}
 
void
static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1777,7 → 1792,7
}
 
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
list_move_tail(&vma->mm_list, &vma->vm->active_list);
return i915_gem_object_move_to_active(vma->obj, ring);
1787,13 → 1802,17
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
struct i915_address_space *vm;
struct i915_vma *vma;
 
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
 
list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
vma = i915_gem_obj_to_vma(obj, vm);
if (vma && !list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vm->inactive_list);
}
 
list_del_init(&obj->ring_list);
obj->ring = NULL;
1811,11 → 1830,24
WARN_ON(i915_verify_lists(dev));
}
 
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
struct intel_engine_cs *ring = obj->ring;
 
if (ring == NULL)
return;
 
if (i915_seqno_passed(ring->get_seqno(ring, true),
obj->last_read_seqno))
i915_gem_object_move_to_inactive(obj);
}
 
static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
int ret, i, j;
 
/* Carefully retire all requests without writing to the rings */
1830,8 → 1862,8
for_each_ring(ring, dev_priv, i) {
intel_ring_init_seqno(ring, seqno);
 
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0;
for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
ring->semaphore.sync_seqno[j] = 0;
}
 
return 0;
1881,18 → 1913,17
return 0;
}
 
int __i915_add_request(struct intel_ring_buffer *ring,
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position, request_start;
int was_empty;
int ret;
 
request_start = intel_ring_get_tail(ring);
request_start = intel_ring_get_tail(ring->buffer);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
1913,7 → 1944,7
* GPU processing the request, we never over-estimate the
* position of the head.
*/
request_ring_position = intel_ring_get_tail(ring);
request_ring_position = intel_ring_get_tail(ring->buffer);
 
ret = ring->add_request(ring);
if (ret)
1939,8 → 1970,7
if (request->ctx)
i915_gem_context_reference(request->ctx);
 
request->emitted_jiffies = GetTimerTicks();
was_empty = list_empty(&ring->request_list);
request->emitted_jiffies = jiffies;
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
 
1961,13 → 1991,11
if (!dev_priv->ums.mm_suspended) {
// i915_queue_hangcheck(ring->dev);
 
if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
}
 
if (out_seqno)
*out_seqno = request->seqno;
1988,120 → 2016,43
spin_unlock(&file_priv->mm.lock);
}
 
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
const struct intel_context *ctx)
{
if (acthd >= i915_gem_obj_offset(obj, vm) &&
acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
return true;
unsigned long elapsed;
 
return false;
}
elapsed = GetTimerTicks()/100 - ctx->hang_stats.guilty_ts;
 
static bool i915_head_inside_request(const u32 acthd_unmasked,
const u32 request_start,
const u32 request_end)
{
const u32 acthd = acthd_unmasked & HEAD_ADDR;
 
if (request_start < request_end) {
if (acthd >= request_start && acthd < request_end)
if (ctx->hang_stats.banned)
return true;
} else if (request_start > request_end) {
if (acthd >= request_start || acthd < request_end)
return true;
}
 
return false;
}
 
static struct i915_address_space *
request_to_vm(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
struct i915_address_space *vm;
 
vm = &dev_priv->gtt.base;
 
return vm;
}
 
static bool i915_request_guilty(struct drm_i915_gem_request *request,
const u32 acthd, bool *inside)
{
/* There is a possibility that unmasked head address
* pointing inside the ring, matches the batch_obj address range.
* However this is extremely unlikely.
*/
if (request->batch_obj) {
if (i915_head_inside_object(acthd, request->batch_obj,
request_to_vm(request))) {
*inside = true;
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
}
}
 
if (i915_head_inside_request(acthd, request->head, request->tail)) {
*inside = false;
} else if (i915_stop_ring_allow_ban(dev_priv)) {
if (i915_stop_ring_allow_warn(dev_priv))
DRM_ERROR("gpu hanging too fast, banning!\n");
return true;
}
 
return false;
}
 
static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
{
const unsigned long elapsed = GetTimerTicks()/100 - hs->guilty_ts;
 
if (hs->banned)
return true;
 
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
DRM_ERROR("context hanging too fast, declaring banned!\n");
return true;
}
 
return false;
}
 
static void i915_set_reset_status(struct intel_ring_buffer *ring,
struct drm_i915_gem_request *request,
u32 acthd)
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
struct intel_context *ctx,
const bool guilty)
{
struct i915_ctx_hang_stats *hs = NULL;
bool inside, guilty;
unsigned long offset = 0;
struct i915_ctx_hang_stats *hs;
 
/* Innocent until proven guilty */
guilty = false;
if (WARN_ON(!ctx))
return;
 
if (request->batch_obj)
offset = i915_gem_obj_offset(request->batch_obj,
request_to_vm(request));
hs = &ctx->hang_stats;
 
if (ring->hangcheck.action != HANGCHECK_WAIT &&
i915_request_guilty(request, acthd, &inside)) {
DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
offset,
request->ctx ? request->ctx->id : 0,
acthd);
 
guilty = true;
}
 
/* If contexts are disabled or this is the default context, use
* file_priv->reset_state
*/
if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
hs = &request->ctx->hang_stats;
else if (request->file_priv)
hs = &request->file_priv->hang_stats;
 
if (hs) {
if (guilty) {
hs->banned = i915_context_is_banned(hs);
hs->banned = i915_context_is_banned(dev_priv, ctx);
hs->batch_active++;
hs->guilty_ts = GetTimerTicks()/100;
} else {
2108,7 → 2059,6
hs->batch_pending++;
}
}
}
 
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
2121,23 → 2071,45
kfree(request);
}
 
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
u32 completed_seqno = ring->get_seqno(ring, false);
u32 acthd = intel_ring_get_active_head(ring);
struct drm_i915_gem_request *request;
u32 completed_seqno;
 
completed_seqno = ring->get_seqno(ring, false);
 
list_for_each_entry(request, &ring->request_list, list) {
if (i915_seqno_passed(completed_seqno, request->seqno))
continue;
 
i915_set_reset_status(ring, request, acthd);
return request;
}
 
return NULL;
}
 
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
bool ring_hung;
 
request = i915_gem_find_active_request(ring);
 
if (request == NULL)
return;
 
ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
i915_set_reset_status(dev_priv, request->ctx, ring_hung);
 
list_for_each_entry_continue(request, &ring->request_list, list)
i915_set_reset_status(dev_priv, request->ctx, false);
}
 
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
2165,6 → 2137,11
 
i915_gem_free_request(request);
}
 
/* These may not have been flush before the reset, do so now */
kfree(ring->preallocated_lazy_request);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
}
 
void i915_gem_restore_fences(struct drm_device *dev)
2191,7 → 2168,7
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
int i;
 
/*
2205,7 → 2182,7
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_cleanup(dev_priv, ring);
 
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_reset(dev);
 
i915_gem_restore_fences(dev);
}
2214,7 → 2191,7
* This function clears the request list as sequence numbers are passed.
*/
void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
uint32_t seqno;
 
2225,6 → 2202,24
 
seqno = ring->get_seqno(ring, true);
 
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
*/
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
 
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
 
if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
 
i915_gem_object_move_to_inactive(obj);
}
 
 
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
 
2241,27 → 2236,11
* of tail of the request to update the last known position
* of the GPU head.
*/
ring->last_retired_head = request->tail;
ring->buffer->last_retired_head = request->tail;
 
i915_gem_free_request(request);
}
 
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
 
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
 
if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
 
i915_gem_object_move_to_inactive(obj);
}
 
if (unlikely(ring->trace_irq_seqno &&
i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
2274,8 → 2253,8
bool
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
bool idle = true;
int i;
 
2366,20 → 2345,14
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
struct intel_engine_cs *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret = 0;
 
if (args->timeout_ns >= 0) {
timeout_stack = ns_to_timespec(args->timeout_ns);
timeout = &timeout_stack;
}
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
2404,9 → 2377,9
goto out;
 
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a 0 timeout (like busy ioctl)
* on this IOCTL with a timeout <=0 (like busy ioctl)
*/
if (!args->timeout_ns) {
if (args->timeout_ns <= 0) {
ret = -ETIME;
goto out;
}
2415,10 → 2388,8
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
 
ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
if (timeout)
args->timeout_ns = timespec_to_ns(timeout);
return ret;
return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
file->driver_priv);
 
out:
drm_gem_object_unreference(&obj->base);
2440,9 → 2411,9
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
struct intel_engine_cs *to)
{
struct intel_ring_buffer *from = obj->ring;
struct intel_engine_cs *from = obj->ring;
u32 seqno;
int ret, idx;
 
2455,7 → 2426,9
idx = intel_ring_sync_index(from, to);
 
seqno = obj->last_read_seqno;
if (seqno <= from->sync_seqno[idx])
/* Optimization: Avoid semaphore sync when we are sure we already
* waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
 
ret = i915_gem_check_olr(obj->ring, seqno);
2463,13 → 2436,13
return ret;
 
trace_i915_gem_ring_sync_to(from, to, seqno);
ret = to->sync_to(to, from, seqno);
ret = to->semaphore.sync_to(to, from, seqno);
if (!ret)
/* We use last_read_seqno because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
from->sync_seqno[idx] = obj->last_read_seqno;
from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
 
return ret;
}
2501,7 → 2474,7
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
 
if(obj == get_fb_obj())
2512,11 → 2485,10
 
if (!drm_mm_node_allocated(&vma->node)) {
i915_gem_vma_destroy(vma);
 
return 0;
}
 
if (obj->pin_count)
if (vma->pin_count)
return -EBUSY;
 
BUG_ON(obj->pages == NULL);
2529,6 → 2501,7
* cause memory corruption through use-after-free.
*/
 
if (i915_is_ggtt(vma->vm)) {
i915_gem_object_finish_gtt(obj);
 
/* release the fence reg _after_ flushing */
2535,18 → 2508,13
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
}
 
trace_i915_vma_unbind(vma);
 
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
if (obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_gtt_finish_object(obj);
vma->unbind_vma(vma);
 
list_del(&vma->mm_list);
list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
obj->map_and_fenceable = true;
2556,8 → 2524,10
 
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (list_empty(&obj->vma_list))
if (list_empty(&obj->vma_list)) {
i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
}
 
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
2568,35 → 2538,15
return 0;
}
 
/**
* Unbinds an object from the global GTT aperture.
*/
int
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
 
if (!i915_gem_obj_ggtt_bound(obj))
return 0;
 
if (obj->pin_count)
return -EBUSY;
 
BUG_ON(obj->pages == NULL);
 
return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
}
 
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int ret, i;
 
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
ret = i915_switch_context(ring, ring->default_context);
if (ret)
return ret;
 
2611,7 → 2561,7
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
 
2663,7 → 2613,7
static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
if (obj) {
2707,7 → 2657,7
static void i830_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
 
if (obj) {
2832,6 → 2782,9
 
fence = &dev_priv->fence_regs[obj->fence_reg];
 
if (WARN_ON(fence->pin_count))
return -EBUSY;
 
i915_gem_object_fence_lost(obj);
i915_gem_object_update_fence(obj, fence, false);
 
3010,18 → 2963,19
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking)
uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
size_t gtt_max =
map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
unsigned long start =
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
unsigned long end =
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
 
3037,48 → 2991,43
obj->tiling_mode, false);
 
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment;
if (map_and_fenceable && alignment & (fence_alignment - 1)) {
DRM_ERROR("Invalid object alignment requested %u\n", alignment);
return -EINVAL;
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
return ERR_PTR(-EINVAL);
}
 
size = map_and_fenceable ? fence_size : obj->base.size;
size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
 
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size > gtt_max) {
DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
if (obj->base.size > end) {
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
obj->base.size,
map_and_fenceable ? "mappable" : "total",
gtt_max);
return -E2BIG;
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return ERR_PTR(-E2BIG);
}
 
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
return ERR_PTR(ret);
 
i915_gem_object_pin_pages(obj);
 
BUG_ON(!i915_is_ggtt(vm));
 
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
if (IS_ERR(vma))
goto err_unpin;
}
 
/* For now we only ever use 1 vma per object */
WARN_ON(!list_is_singular(&obj->vma_list));
 
search_free:
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max,
DRM_MM_SEARCH_DEFAULT);
obj->cache_level,
start, end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) {
 
goto err_free_vma;
3108,19 → 3057,23
obj->map_and_fenceable = mappable && fenceable;
}
 
WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
 
trace_i915_vma_bind(vma, map_and_fenceable);
trace_i915_vma_bind(vma, flags);
vma->bind_vma(vma, obj->cache_level,
flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
 
i915_gem_verify_gtt(dev);
return 0;
return vma;
 
err_remove_node:
drm_mm_remove_node(&vma->node);
err_free_vma:
i915_gem_vma_destroy(vma);
vma = ERR_PTR(ret);
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
return vma;
}
 
bool
3215,7 → 3168,7
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
uint32_t old_write_domain, old_read_domains;
int ret;
 
3230,6 → 3183,7
if (ret)
return ret;
 
i915_gem_object_retire(obj);
i915_gem_object_flush_cpu_write_domain(obj, false);
 
/* Serialise direct access to this object with the barriers for
3273,25 → 3227,22
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_vma *vma;
struct i915_vma *vma, *next;
int ret;
 
if (obj->cache_level == cache_level)
return 0;
 
if (obj->pin_count) {
if (i915_gem_obj_is_pinned(obj)) {
DRM_DEBUG("can not change the cache level of pinned objects\n");
return -EBUSY;
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link) {
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
return ret;
 
break;
}
}
 
3312,11 → 3263,10
return ret;
}
 
if (obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, cache_level);
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
vma->bind_vma(vma, cache_level,
obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
3332,6 → 3282,7
* in obj->write_domain and have been skipping the clflushes.
* Just set it to the CPU cache for now.
*/
i915_gem_object_retire(obj);
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
 
old_read_domains = obj->base.read_domains;
3429,6 → 3380,15
 
static bool is_pin_display(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
 
if (list_empty(&obj->vma_list))
return false;
 
vma = i915_gem_obj_to_ggtt(obj);
if (!vma)
return false;
 
/* There are 3 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
3440,7 → 3400,7
* subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine.
*/
return obj->pin_count - !!obj->user_pin_count;
return vma->pin_count - !!obj->user_pin_count;
}
 
/*
3451,9 → 3411,10
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined)
struct intel_engine_cs *pipelined)
{
u32 old_read_domains, old_write_domain;
bool was_pin_display;
int ret;
 
if (pipelined != obj->ring) {
3465,6 → 3426,7
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
was_pin_display = obj->pin_display;
obj->pin_display = true;
 
/* The display engine is not coherent with the LLC cache on gen6. As
3485,7 → 3447,7
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
if (ret)
goto err_unpin_display;
 
3507,7 → 3469,8
return 0;
 
err_unpin_display:
obj->pin_display = is_pin_display(obj);
WARN_ON(was_pin_display != is_pin_display(obj));
obj->pin_display = was_pin_display;
return ret;
}
 
3514,7 → 3477,7
void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin(obj);
i915_gem_object_ggtt_unpin(obj);
obj->pin_display = is_pin_display(obj);
}
 
3554,6 → 3517,7
if (ret)
return ret;
 
i915_gem_object_retire(obj);
i915_gem_object_flush_gtt_write_domain(obj);
 
old_write_domain = obj->base.write_domain;
3601,9 → 3565,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20);
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
struct intel_engine_cs *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret;
3637,72 → 3601,117
return ret;
}
 
static bool
i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
{
struct drm_i915_gem_object *obj = vma->obj;
 
if (alignment &&
vma->node.start & (alignment - 1))
return true;
 
if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
return true;
 
if (flags & PIN_OFFSET_BIAS &&
vma->node.start < (flags & PIN_OFFSET_MASK))
return true;
 
return false;
}
 
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
uint64_t flags)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
int ret;
 
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
return -ENODEV;
 
WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
 
vma = i915_gem_obj_to_vma(obj, vm);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
 
if (vma) {
if ((alignment &&
vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
if (i915_vma_misplaced(vma, alignment, flags)) {
WARN(vma->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
if (ret)
return ret;
 
vma = NULL;
}
}
 
if (!i915_gem_obj_bound(obj, vm)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
 
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
 
if (!obj->has_global_gtt_mapping && map_and_fenceable)
i915_gem_gtt_bind_object(obj, obj->cache_level);
if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 
obj->pin_count++;
obj->pin_mappable |= map_and_fenceable;
vma->pin_count++;
if (flags & PIN_MAPPABLE)
obj->pin_mappable |= true;
 
return 0;
}
 
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
BUG_ON(!i915_gem_obj_bound_any(obj));
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
 
if (--obj->pin_count == 0)
BUG_ON(!vma);
BUG_ON(vma->pin_count == 0);
BUG_ON(!i915_gem_obj_ggtt_bound(obj));
 
if (--vma->pin_count == 0)
obj->pin_mappable = false;
}
 
bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
 
WARN_ON(!ggtt_vma ||
dev_priv->fence_regs[obj->fence_reg].pin_count >
ggtt_vma->pin_count);
dev_priv->fence_regs[obj->fence_reg].pin_count++;
return true;
} else
return false;
}
 
void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
}
 
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
3711,6 → 3720,9
struct drm_i915_gem_object *obj;
int ret;
 
if (INTEL_INFO(dev)->gen >= 6)
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
3722,13 → 3734,13
}
 
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
ret = -EINVAL;
DRM_DEBUG("Attempting to pin a purgeable buffer\n");
ret = -EFAULT;
goto out;
}
 
if (obj->pin_filp != NULL && obj->pin_filp != file) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
3740,7 → 3752,7
}
 
if (obj->user_pin_count == 0) {
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
if (ret)
goto out;
}
3775,7 → 3787,7
}
 
if (obj->pin_filp != file) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
3783,7 → 3795,7
obj->user_pin_count--;
if (obj->user_pin_count == 0) {
obj->pin_filp = NULL;
i915_gem_object_unpin(obj);
i915_gem_object_ggtt_unpin(obj);
}
 
out:
3865,7 → 3877,7
goto unlock;
}
 
if (obj->pin_count) {
if (i915_gem_obj_is_pinned(obj)) {
ret = -EINVAL;
goto out;
}
3958,7 → 3970,7
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_vma *vma, *next;
 
intel_runtime_pm_get(dev_priv);
3965,13 → 3977,11
 
trace_i915_gem_object_destroy(obj);
 
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
int ret;
 
obj->pin_count = 0;
/* NB: 0 or 1 elements */
WARN_ON(!list_empty(&obj->vma_list) &&
!list_is_singular(&obj->vma_list));
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
int ret = i915_vma_unbind(vma);
vma->pin_count = 0;
ret = i915_vma_unbind(vma);
if (WARN_ON(ret == -ERESTARTSYS)) {
bool was_interruptible;
 
3989,11 → 3999,12
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
 
WARN_ON(obj->frontbuffer_bits);
 
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
// i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
 
BUG_ON(obj->pages);
 
4024,41 → 4035,6
return NULL;
}
 
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
 
INIT_LIST_HEAD(&vma->vma_link);
INIT_LIST_HEAD(&vma->mm_list);
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
 
/* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list);
else
list_add_tail(&vma->vma_link, &obj->vma_list);
 
return vma;
}
 
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
 
vma = i915_gem_obj_to_vma(obj, vm);
if (!vma)
vma = __i915_gem_vma_create(obj, vm);
 
return vma;
}
 
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
4076,7 → 4052,7
int
i915_gem_suspend(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
 
mutex_lock(&dev->struct_mutex);
4094,7 → 4070,7
i915_gem_evict_everything(dev);
 
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_stop_ringbuffers(dev);
 
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
4106,7 → 4082,7
 
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
flush_delayed_work(&dev_priv->mm.idle_work);
 
return 0;
 
4116,10 → 4092,10
}
#endif
 
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
4149,7 → 4125,7
 
void i915_gem_init_swizzling(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4215,13 → 4191,20
goto cleanup_blt_ring;
}
 
if (HAS_BSD2(dev)) {
ret = intel_init_bsd2_ring_buffer(dev);
if (ret)
goto cleanup_vebox_ring;
}
 
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
goto cleanup_vebox_ring;
goto cleanup_bsd2_ring;
 
return 0;
 
cleanup_bsd2_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
cleanup_vebox_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
cleanup_blt_ring:
4237,7 → 4220,7
int
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
 
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4251,10 → 4234,16
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
if (HAS_PCH_NOP(dev)) {
if (IS_IVYBRIDGE(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
I915_WRITE(GEN7_MSG_CTL, temp);
} else if (INTEL_INFO(dev)->gen >= 7) {
u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
}
}
 
i915_gem_init_swizzling(dev);
 
4266,27 → 4255,21
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
 
/*
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
* XXX: Contexts should only be initialized once. Doing a switch to the
* default context switch however is something we'd like to do after
* reset or thaw (the latter may not actually be necessary for HW, but
* goes with our code better). Context switching requires rings (for
* the do_switch), but before enabling PPGTT. So don't move this.
*/
ret = i915_gem_context_init(dev);
if (ret) {
ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
DRM_ERROR("Context initialization failed %d\n", ret);
return ret;
}
 
if (dev_priv->mm.aliasing_ppgtt) {
ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
if (ret) {
i915_gem_cleanup_aliasing_ppgtt(dev);
DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
return ret;
}
}
 
return 0;
}
 
int i915_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4296,29 → 4279,40
 
if (IS_VALLEYVIEW(dev)) {
/* VLVA0 (potential hack), BIOS isn't actually waking us */
I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
VLV_GTLC_ALLOWWAKEACK), 10))
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
 
i915_gem_init_global_gtt(dev);
 
ret = i915_gem_init_hw(dev);
ret = i915_gem_context_init(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
if (ret) {
i915_gem_cleanup_aliasing_ppgtt(dev);
return ret;
}
 
ret = i915_gem_init_hw(dev);
if (ret == -EIO) {
/* Allow ring initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0;
}
mutex_unlock(&dev->struct_mutex);
 
return 0;
return ret;
}
 
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int i;
 
for_each_ring(ring, dev_priv, i)
4352,16 → 4346,15
}
 
BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
mutex_unlock(&dev->struct_mutex);
 
ret = drm_irq_install(dev);
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_ringbuffer;
mutex_unlock(&dev->struct_mutex);
 
return 0;
 
cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
4376,7 → 4369,9
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
 
mutex_lock(&dev->struct_mutex);
drm_irq_uninstall(dev);
mutex_unlock(&dev->struct_mutex);
 
return i915_gem_suspend(dev);
}
4396,26 → 4391,28
#endif
 
static void
init_ring_lists(struct intel_ring_buffer *ring)
init_ring_lists(struct intel_engine_cs *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
}
 
static void i915_init_vm(struct drm_i915_private *dev_priv,
void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
if (!i915_is_ggtt(vm))
drm_mm_init(&vm->mm, vm->start, vm->total);
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->global_link);
list_add(&vm->global_link, &dev_priv->vm_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
 
void
i915_gem_load(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
INIT_LIST_HEAD(&dev_priv->vm_list);
4458,215 → 4455,53
 
dev_priv->mm.interruptible = true;
 
mutex_init(&dev_priv->fb_tracking.lock);
}
 
#if 0
/*
* Create a physically contiguous memory object for this object
* e.g. for cursor + overlay regs
*/
static int i915_gem_init_phys_object(struct drm_device *dev,
int id, int size, int align)
int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
struct drm_i915_file_private *file_priv;
int ret;
 
if (dev_priv->mm.phys_objs[id - 1] || !size)
return 0;
DRM_DEBUG_DRIVER("\n");
 
phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
if (!phys_obj)
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
 
phys_obj->id = id;
file->driver_priv = file_priv;
file_priv->dev_priv = dev->dev_private;
file_priv->file = file;
 
phys_obj->handle = drm_pci_alloc(dev, size, align);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
}
#ifdef CONFIG_X86
set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
// INIT_DELAYED_WORK(&file_priv->mm.idle_work,
// i915_gem_file_idle_work_handler);
 
dev_priv->mm.phys_objs[id - 1] = phys_obj;
ret = i915_gem_context_open(dev, file);
if (ret)
kfree(file_priv);
 
return 0;
kfree_obj:
kfree(phys_obj);
return ret;
}
 
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
 
if (!dev_priv->mm.phys_objs[id - 1])
return;
 
phys_obj = dev_priv->mm.phys_objs[id - 1];
if (phys_obj->cur_obj) {
i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
if (old) {
WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
old->frontbuffer_bits &= ~frontbuffer_bits;
}
 
#ifdef CONFIG_X86
set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif
drm_pci_free(dev, phys_obj->handle);
kfree(phys_obj);
dev_priv->mm.phys_objs[id - 1] = NULL;
if (new) {
WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
new->frontbuffer_bits |= frontbuffer_bits;
}
 
void i915_gem_free_all_phys_object(struct drm_device *dev)
{
int i;
 
for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
i915_gem_free_phys_object(dev, i);
}
 
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj)
{
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
char *vaddr;
int i;
int page_count;
 
if (!obj->phys_obj)
return;
vaddr = obj->phys_obj->handle->vaddr;
 
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
kunmap_atomic(dst);
 
drm_clflush_pages(&page, 1);
 
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
}
}
i915_gem_chipset_flush(dev);
 
obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL;
}
 
int
i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
int id,
int align)
{
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = 0;
int page_count;
int i;
 
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
 
if (obj->phys_obj) {
if (obj->phys_obj->id == id)
return 0;
i915_gem_detach_phys_object(dev, obj);
}
 
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
obj->base.size, align);
if (ret) {
DRM_ERROR("failed to init phys object %d size: %zu\n",
id, obj->base.size);
return ret;
}
}
 
/* bind to the object */
obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj->phys_obj->cur_obj = obj;
 
page_count = obj->base.size / PAGE_SIZE;
 
for (i = 0; i < page_count; i++) {
struct page *page;
char *dst, *src;
 
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);
 
src = kmap_atomic(page);
dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
 
mark_page_accessed(page);
page_cache_release(page);
}
 
return 0;
}
 
static int
i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
 
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
 
/* The physical object once assigned is fixed for the lifetime
* of the obj, so we can safely drop the lock and continue
* to access vaddr.
*/
mutex_unlock(&dev->struct_mutex);
unwritten = copy_from_user(vaddr, user_data, args->size);
mutex_lock(&dev->struct_mutex);
if (unwritten)
return -EFAULT;
}
 
i915_gem_chipset_flush(dev);
return 0;
}
 
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
 
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
spin_lock(&file_priv->mm.lock);
while (!list_empty(&file_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
 
request = list_first_entry(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_list);
list_del(&request->client_list);
request->file_priv = NULL;
}
spin_unlock(&file_priv->mm.lock);
}
#endif
 
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
4687,16 → 4522,18
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
 
if (vm == &dev_priv->mm.aliasing_ppgtt->base)
if (!dev_priv->mm.aliasing_ppgtt ||
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
 
BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
return vma->node.start;
 
}
return 0; //-1;
WARN(1, "%s vma for this object not found.\n",
i915_is_ggtt(vm) ? "global" : "ppgtt");
return -1;
}
 
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4728,7 → 4565,8
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
 
if (vm == &dev_priv->mm.aliasing_ppgtt->base)
if (!dev_priv->mm.aliasing_ppgtt ||
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
 
BUG_ON(list_empty(&o->vma_list));
4741,15 → 4579,19
}
 
 
 
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
 
/* This WARN has probably outlived its usefulness (callers already
* WARN if they don't find the GGTT vma they expect). When removing,
* remember to remove the pre-check in is_pin_display() as well */
if (WARN_ON(list_empty(&obj->vma_list)))
return NULL;
 
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
if (vma->vm != obj_to_ggtt(obj))
return NULL;
 
return vma;
/drivers/video/drm/i915/i915_gem_context.c
93,12 → 93,61
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
*/
#define CONTEXT_ALIGN (64<<10)
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
 
static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
static int do_switch(struct i915_hw_context *to);
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
 
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
ppgtt->base.cleanup(&ppgtt->base);
return;
}
 
/*
* Make sure vmas are unbound before we take down the drm_mm
*
* FIXME: Proper refcounting should take care of this, this shouldn't be
* needed at all.
*/
if (!list_empty(&vm->active_list)) {
struct i915_vma *vma;
 
list_for_each_entry(vma, &vm->active_list, mm_list)
if (WARN_ON(list_empty(&vma->vma_link) ||
list_is_singular(&vma->vma_link)))
break;
 
i915_gem_evict_vm(&ppgtt->base, true);
} else {
i915_gem_retire_requests(dev);
i915_gem_evict_vm(&ppgtt->base, false);
}
 
ppgtt->base.cleanup(&ppgtt->base);
}
 
static void ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
 
do_ppgtt_cleanup(ppgtt);
kfree(ppgtt);
}
 
static size_t get_context_alignment(struct drm_device *dev)
{
if (IS_GEN6(dev))
return GEN6_CONTEXT_ALIGN;
 
return GEN7_CONTEXT_ALIGN;
}
 
static int get_context_size(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
129,20 → 178,80
 
void i915_gem_context_free(struct kref *ctx_ref)
{
struct i915_hw_context *ctx = container_of(ctx_ref,
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
 
if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */
if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx);
}
 
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
}
 
static struct i915_hw_context *
create_hw_context(struct drm_device *dev,
static struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
struct drm_i915_gem_object *obj;
int ret;
 
obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
 
/*
* Try to make the context utilize L3 as well as LLC.
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*/
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
drm_gem_object_unreference(&obj->base);
return ERR_PTR(ret);
}
}
 
return obj;
}
 
static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
 
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ERR_PTR(-ENOMEM);
 
ret = i915_gem_init_ppgtt(dev, ppgtt);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
}
 
ppgtt->ctx = ctx;
return ppgtt;
}
 
static struct intel_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
struct intel_context *ctx;
int ret;
 
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
150,40 → 259,29
return ERR_PTR(-ENOMEM);
 
kref_init(&ctx->ref);
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
INIT_LIST_HEAD(&ctx->link);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
return ERR_PTR(-ENOMEM);
}
list_add_tail(&ctx->link, &dev_priv->context_list);
 
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_out;
}
ctx->legacy_hw_ctx.rcs_state = obj;
}
 
/* The ring associated with the context object is handled by the normal
* object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
list_add_tail(&ctx->link, &dev_priv->context_list);
 
/* Default context will never have a file_priv */
if (file_priv == NULL)
return ctx;
 
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
GFP_KERNEL);
if (file_priv != NULL) {
ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
} else
ret = DEFAULT_CONTEXT_HANDLE;
 
ctx->file_priv = file_priv;
ctx->id = ret;
ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
196,84 → 294,150
return ERR_PTR(ret);
}
 
static inline bool is_default_context(struct i915_hw_context *ctx)
{
return (ctx == ctx->ring->default_context);
}
 
/**
* The default context needs to exist per ring that uses contexts. It stores the
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
static int create_default_context(struct drm_i915_private *dev_priv)
static struct intel_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv,
bool create_vm)
{
struct i915_hw_context *ctx;
int ret;
const bool is_global_default_ctx = file_priv == NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int ret = 0;
 
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 
ctx = create_hw_context(dev_priv->dev, NULL);
ctx = __create_hw_context(dev, file_priv);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
return ctx;
 
/* We may need to do things with the shrinker which require us to
* immediately switch back to the default context. This can cause a
* problem as pinning the default context also requires GTT space which
* may not be available. To avoid this we always pin the
* default context.
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
* default context also requires GTT space which may not
* be available. To avoid this we always pin the default
* context.
*/
ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
}
}
 
ret = do_switch(ctx);
if (ret) {
DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
if (create_vm) {
struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
 
if (IS_ERR_OR_NULL(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
ret = PTR_ERR(ppgtt);
goto err_unpin;
} else
ctx->vm = &ppgtt->base;
 
/* This case is reserved for the global default context and
* should only happen once. */
if (is_global_default_ctx) {
if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
ret = -EEXIST;
goto err_unpin;
}
 
dev_priv->ring[RCS].default_context = ctx;
dev_priv->mm.aliasing_ppgtt = ppgtt;
}
} else if (USES_PPGTT(dev)) {
/* For platforms which only have aliasing PPGTT, we fake the
* address space and refcounting. */
ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
} else
ctx->vm = &dev_priv->gtt.base;
 
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
return ctx;
 
err_unpin:
i915_gem_object_unpin(ctx->obj);
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
i915_gem_context_unreference(ctx);
return ret;
return ERR_PTR(ret);
}
 
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* Prevent the hardware from restoring the last context (which hung) on
* the next switch */
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context;
struct intel_context *lctx = ring->last_context;
 
/* Do a fake switch to the default context */
if (lctx == dctx)
continue;
 
if (!lctx)
continue;
 
if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0));
/* Fake a finish/inactive */
dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
dctx->legacy_hw_ctx.rcs_state->active = 0;
}
 
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
 
i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx);
ring->last_context = dctx;
}
}
 
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
struct intel_context *ctx;
int i;
 
if (!HAS_HW_CONTEXTS(dev))
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
 
/* If called from reset, or thaw... we've been here already */
if (dev_priv->ring[RCS].default_context)
return 0;
 
if (HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
 
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
return -E2BIG;
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
dev_priv->hw_context_size = 0;
}
}
 
ret = create_default_context(dev_priv);
if (ret) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
ret);
return ret;
ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n",
PTR_ERR(ctx));
return PTR_ERR(ctx);
}
 
DRM_DEBUG_DRIVER("HW context support initialized\n");
/* NB: RCS will hold a ref for all rings */
for (i = 0; i < I915_NUM_RINGS; i++)
dev_priv->ring[i].default_context = ctx;
 
DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
return 0;
}
 
280,11 → 444,10
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i;
 
if (!HAS_HW_CONTEXTS(dev))
return;
 
if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
299,46 → 462,80
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
WARN_ON(dctx->obj->active);
i915_gem_object_unpin(dctx->obj);
WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
 
i915_gem_object_unpin(dctx->obj);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
 
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
 
ring->default_context = NULL;
ring->last_context = NULL;
}
 
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].default_context = NULL;
dev_priv->ring[RCS].last_context = NULL;
}
 
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
int ret, i;
 
/* This is the only place the aliasing PPGTT gets enabled, which means
* it has to happen before we bail on reset */
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->enable(ppgtt);
}
 
/* FIXME: We should make this work, even in reset */
if (i915_reset_in_progress(&dev_priv->gpu_error))
return 0;
 
BUG_ON(!dev_priv->ring[RCS].default_context);
 
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context);
if (ret)
return ret;
}
 
return 0;
}
 
static int context_idr_cleanup(int id, void *p, void *data)
{
struct i915_hw_context *ctx = p;
struct intel_context *ctx = p;
 
BUG_ON(id == DEFAULT_CONTEXT_ID);
 
i915_gem_context_unreference(ctx);
return 0;
}
 
struct i915_ctx_hang_stats *
i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id)
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
struct intel_context *ctx;
 
if (id == DEFAULT_CONTEXT_ID)
return &file_priv->hang_stats;
idr_init(&file_priv->context_idr);
 
if (!HAS_HW_CONTEXTS(dev))
return ERR_PTR(-ENOENT);
mutex_lock(&dev->struct_mutex);
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
mutex_unlock(&dev->struct_mutex);
 
ctx = i915_gem_context_get(file->driver_priv, id);
if (ctx == NULL)
return ERR_PTR(-ENOENT);
if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr);
return PTR_ERR(ctx);
}
 
return &ctx->hang_stats;
return 0;
}
 
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
349,15 → 546,21
idr_destroy(&file_priv->context_idr);
}
 
static struct i915_hw_context *
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
struct intel_context *ctx;
 
ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
if (!ctx)
return ERR_PTR(-ENOENT);
 
return ctx;
}
 
static inline int
mi_set_context(struct intel_ring_buffer *ring,
struct i915_hw_context *new_context,
mi_set_context(struct intel_engine_cs *ring,
struct intel_context *new_context,
u32 hw_flags)
{
int ret;
367,7 → 570,7
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
if (IS_GEN6(ring->dev)) {
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
377,8 → 580,8
if (ret)
return ret;
 
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
if (IS_GEN7(ring->dev))
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(ring->dev)->gen >= 7)
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
intel_ring_emit(ring, MI_NOOP);
385,15 → 588,18
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
hw_flags);
/* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
intel_ring_emit(ring, MI_NOOP);
 
if (IS_GEN7(ring->dev))
if (INTEL_INFO(ring->dev)->gen >= 7)
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
else
intel_ring_emit(ring, MI_NOOP);
403,21 → 609,31
return ret;
}
 
static int do_switch(struct i915_hw_context *to)
static int do_switch(struct intel_engine_cs *ring,
struct intel_context *to)
{
struct intel_ring_buffer *ring = to->ring;
struct i915_hw_context *from = ring->last_context;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
bool uninitialized = false;
int ret, i;
 
BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
if (from != NULL && ring == &dev_priv->ring[RCS]) {
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
 
if (from == to && !to->remap_slice)
return 0;
 
ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
/* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0);
if (ret)
return ret;
}
 
/*
* Pin can switch back to the default context if we end up calling into
426,6 → 642,18
*/
from = ring->last_context;
 
if (USES_FULL_PPGTT(ring->dev)) {
ret = ppgtt->switch_mm(ppgtt, ring, false);
if (ret)
goto unpin_out;
}
 
if (ring != &dev_priv->ring[RCS]) {
if (from)
i915_gem_context_unreference(from);
goto done;
}
 
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
434,23 → 662,22
*
* XXX: We need a real interface to do this instead of trickery.
*/
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
i915_gem_object_unpin(to->obj);
return ret;
ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
if (ret)
goto unpin_out;
 
if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
 
if (!to->obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
 
if (!to->is_initialized || is_default_context(to))
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
 
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
i915_gem_object_unpin(to->obj);
return ret;
}
if (ret)
goto unpin_out;
 
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
471,8 → 698,8
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
480,26 → 707,39
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
from->legacy_hw_ctx.rcs_state->dirty = 1;
BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
 
/* obj is kept alive until the next request by its active ref */
i915_gem_object_unpin(from->obj);
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
 
uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
to->legacy_hw_ctx.initialized = true;
 
done:
i915_gem_context_reference(to);
ring->last_context = to;
to->is_initialized = true;
 
if (uninitialized) {
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
}
 
return 0;
 
unpin_out:
if (ring->id == RCS)
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
 
/**
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
* @file_priv: file_priv associated with the context, may be NULL
* @id: context id number
* @to: the context to switch to
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
506,33 → 746,29
* it will have a refoucnt > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO.
*/
int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_file *file,
int to_id)
int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *to;
 
if (!HAS_HW_CONTEXTS(ring->dev))
return 0;
 
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
if (ring != &dev_priv->ring[RCS])
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) {
i915_gem_context_reference(to);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
ring->last_context = to;
}
return 0;
}
 
if (to_id == DEFAULT_CONTEXT_ID) {
to = ring->default_context;
} else {
if (file == NULL)
return -EINVAL;
 
to = i915_gem_context_get(file->driver_priv, to_id);
if (to == NULL)
return -ENOENT;
return do_switch(ring, to);
}
 
return do_switch(to);
static bool hw_context_enabled(struct drm_device *dev)
{
return to_i915(dev)->hw_context_size;
}
 
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
540,25 → 776,22
{
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
struct intel_context *ctx;
int ret;
 
if (!(dev->driver->driver_features & DRIVER_GEM))
if (!hw_context_enabled(dev))
return -ENODEV;
 
if (!HAS_HW_CONTEXTS(dev))
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
ctx = create_hw_context(dev, file_priv);
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
 
args->ctx_id = ctx->id;
args->ctx_id = ctx->user_handle;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
 
return 0;
569,11 → 802,11
{
struct drm_i915_gem_context_destroy *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
struct intel_context *ctx;
int ret;
 
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
580,12 → 813,12
return ret;
 
ctx = i915_gem_context_get(file_priv, args->ctx_id);
if (!ctx) {
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return -ENOENT;
return PTR_ERR(ctx);
}
 
idr_remove(&ctx->file_priv->context_idr, ctx->id);
idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
 
/drivers/video/drm/i915/i915_gem_evict.c
0,0 → 1,277
/*
* Copyright © 2008-2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uuk>
*
*/
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
 
#include "i915_drv.h"
#include "intel_drv.h"
#include "i915_trace.h"
 
static bool
mark_free(struct i915_vma *vma, struct list_head *unwind)
{
if (vma->pin_count)
return false;
 
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
 
list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
 
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @dev: drm_device
* @vm: address space to evict from
* @size: size of the desired free space
* @alignment: alignment constraint of the desired free space
* @cache_level: cache_level for the desired space
* @mappable: whether the free space must be mappable
* @nonblocking: whether evicting active objects is allowed or not
*
* This function will try to evict vmas until a free space satisfying the
* requirements is found. Callers must check first whether any such hole exists
* already before calling this function.
*
* This function is used by the object/vma binding code.
*
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level,
unsigned long start, unsigned long end,
unsigned flags)
{
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
int ret = 0;
int pass = 0;
 
trace_i915_gem_evict(dev, min_size, alignment, flags);
 
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
* retirement order. The next objects to retire are those on the (per
* ring) active list that do not have an outstanding flush. Once the
* hardware reports completion (the seqno is updated after the
* batchbuffer has been finished) the clean buffer objects would
* be retired to the inactive list. Any dirty objects would be added
* to the tail of the flushing list. So after processing the clean
* active objects we need to emit a MI_FLUSH to retire the flushing
* list, hence the retirement order of the flushing list is in
* advance of the dirty objects on the active lists.
*
* The retirement sequence is thus:
* 1. Inactive objects (already retired)
* 2. Clean active objects
* 3. Flushing list
* 4. Dirty active objects.
*
* On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL.
*/
 
INIT_LIST_HEAD(&unwind_list);
if (start != 0 || end != vm->total) {
drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level,
start, end);
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
search_again:
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list))
goto found;
}
 
if (flags & PIN_NONBLOCK)
goto none;
 
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(vma, &vm->active_list, mm_list) {
if (mark_free(vma, &unwind_list))
goto found;
}
 
none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
vma = list_first_entry(&unwind_list,
struct i915_vma,
exec_list);
ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
 
list_del_init(&vma->exec_list);
}
 
/* Can we unpin some objects such as idle hw contents,
* or pending flips?
*/
if (flags & PIN_NONBLOCK)
return -ENOSPC;
 
/* Only idle the GPU and repeat the search once */
if (pass++ == 0) {
ret = i915_gpu_idle(dev);
if (ret)
return ret;
 
i915_gem_retire_requests(dev);
goto search_again;
}
 
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
*/
return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
 
found:
/* drm_mm doesn't allow any other other operations while
* scanning, therefore store to be evicted objects on a
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
vma = list_first_entry(&unwind_list,
struct i915_vma,
exec_list);
if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&vma->exec_list, &eviction_list);
drm_gem_object_reference(&vma->obj->base);
continue;
}
list_del_init(&vma->exec_list);
}
 
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
struct drm_gem_object *obj;
vma = list_first_entry(&eviction_list,
struct i915_vma,
exec_list);
 
obj = &vma->obj->base;
list_del_init(&vma->exec_list);
if (ret == 0)
ret = i915_vma_unbind(vma);
 
drm_gem_object_unreference(obj);
}
 
return ret;
}
 
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
*
* @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
* This function evicts all idles vmas from a vm. If all unpinned vmas should be
* evicted the @do_idle needs to be set to true.
*
* This is used by the execbuf code as a last-ditch effort to defragment the
* address space.
*
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
{
struct i915_vma *vma, *next;
int ret;
 
trace_i915_gem_evict_vm(vm);
 
if (do_idle) {
ret = i915_gpu_idle(vm->dev);
if (ret)
return ret;
 
i915_gem_retire_requests(vm->dev);
}
 
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
 
return 0;
}
 
/**
* i915_gem_evict_everything - Try to evict all objects
* @dev: Device to evict objects for
*
* This functions tries to evict all gem objects from all address spaces. Used
* by the shrinker as a last-ditch effort and for suspend, before releasing the
* backing storage of all unbound objects.
*/
int
i915_gem_evict_everything(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm;
bool lists_empty = true;
int ret;
 
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
lists_empty = (list_empty(&vm->inactive_list) &&
list_empty(&vm->active_list));
if (!lists_empty)
lists_empty = false;
}
 
if (lists_empty)
return -ENOSPC;
 
trace_i915_gem_evict_everything(dev);
 
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
* with retire requests.
*/
ret = i915_gpu_idle(dev);
if (ret)
return ret;
 
i915_gem_retire_requests(dev);
 
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
WARN_ON(i915_gem_evict_vm(vm, false));
 
return 0;
}
/drivers/video/drm/i915/i915_gem_execbuffer.c
31,17 → 31,13
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
//#include <linux/dma_remapping.h>
#include <linux/dma_remapping.h>
 
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
 
static unsigned long
copy_to_user(void __user *to, const void *from, unsigned long n)
{
memcpy(to, from, n);
return 0;
}
#define BATCH_OFFSET_BIAS (256*1024)
 
static unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
105,6 → 101,7
struct i915_address_space *vm,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
struct drm_i915_gem_object *obj;
struct list_head objects;
int i, ret;
139,7 → 136,21
i = 0;
while (!list_empty(&objects)) {
struct i915_vma *vma;
struct i915_address_space *bind_vm = vm;
 
if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
USES_FULL_PPGTT(vm->dev)) {
ret = -EINVAL;
goto err;
}
 
/* If we have secure dispatch, or the userspace assures us that
* they know what they're doing, use the GGTT VM.
*/
if (((args->flags & I915_EXEC_SECURE) &&
(i == (args->buffer_count - 1))))
bind_vm = &dev_priv->gtt.base;
 
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
152,7 → 163,7
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
231,7 → 242,7
i915_gem_object_unpin_fence(obj);
 
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj);
vma->pin_count--;
 
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
261,11 → 272,13
 
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc)
struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = reloc->delta + target_offset;
char *vaddr;
int ret;
 
273,19 → 286,32
if (ret)
return ret;
 
vaddr = dev_priv->gtt.mappable+4096;
vaddr = (char*)dev_priv->gtt.mappable+4096;
MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,reloc->offset >> PAGE_SHIFT), PG_SW);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
 
if (INTEL_INFO(dev)->gen >= 8) {
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 
if (page_offset == 0) {
MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT), PG_SW);
}
 
*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
}
 
return 0;
}
 
static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc)
struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t delta = reloc->delta + target_offset;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
int ret;
305,8 → 331,9
reloc_page = dev_priv->gtt.mappable;
reloc_entry = (uint32_t __iomem *)
(reloc_page + offset_in_page(reloc->offset));
iowrite32(reloc->delta, reloc_entry);
iowrite32(lower_32_bits(delta), reloc_entry);
 
 
return 0;
}
 
313,14 → 340,13
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
struct i915_vma *target_vma;
uint32_t target_offset;
uint64_t target_offset;
int ret;
 
/* we've already hold a reference to all valid objects */
338,8 → 364,10
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
i915_gem_gtt_bind_object(target_i915_obj,
target_i915_obj->cache_level);
struct i915_vma *vma =
list_first_entry(&target_i915_obj->vma_list,
typeof(*vma), vma_link);
vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
}
 
/* Validate that the target is in a valid r/w GPU domain */
394,11 → 422,10
 
/* We can't wait for rendering with pagefaults disabled */
 
reloc->delta += target_offset;
if (use_cpu_reloc(obj))
ret = relocate_entry_cpu(obj, reloc);
ret = relocate_entry_cpu(obj, reloc, target_offset);
else
ret = relocate_entry_gtt(obj, reloc);
ret = relocate_entry_gtt(obj, reloc, target_offset);
 
if (ret)
return ret;
434,8 → 461,7
do {
u64 offset = r->presumed_offset;
 
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
vma->vm);
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
if (ret)
return ret;
 
464,8 → 490,7
int i, ret;
 
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
vma->vm);
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
if (ret)
return ret;
}
507,24 → 532,31
 
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_ring_buffer *ring,
struct intel_engine_cs *ring,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
struct drm_i915_gem_object *obj = vma->obj;
bool need_fence;
uint64_t flags;
int ret;
 
flags = 0;
 
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
if (need_fence || need_reloc_mappable(vma))
flags |= PIN_MAPPABLE;
 
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
false);
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
 
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
if (ret)
return ret;
 
543,14 → 575,6
}
}
 
/* Ensure ppgtt mapping exists if needed */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);
 
obj->has_aliasing_ppgtt_mapping = 1;
}
 
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start;
*need_reloc = true;
561,15 → 585,41
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
}
 
if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
!obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, obj->cache_level);
 
return 0;
}
 
static bool
eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
bool need_fence, need_mappable;
 
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
 
WARN_ON((need_mappable || need_fence) &&
!i915_is_ggtt(vma->vm));
 
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
return true;
 
if (need_mappable && !obj->map_and_fenceable)
return true;
 
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
 
return false;
}
 
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct list_head *vmas,
bool *need_relocs)
{
583,6 → 633,8
if (list_empty(vmas))
return 0;
 
i915_gem_retire_requests_ring(ring);
 
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
INIT_LIST_HEAD(&ordered_vmas);
629,26 → 681,10
 
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool need_fence, need_mappable;
 
obj = vma->obj;
 
if (!drm_mm_node_allocated(&vma->node))
continue;
 
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
 
WARN_ON((need_mappable || need_fence) &&
!i915_is_ggtt(vma->vm));
 
if ((entry->alignment &&
vma->node.start & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
if (eb_vma_misplaced(vma, has_fenced_gpu_access))
ret = i915_vma_unbind(vma);
else
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
674,7 → 710,7
list_for_each_entry(vma, vmas, exec_list)
i915_gem_execbuffer_unreserve_vma(vma);
 
// ret = i915_gem_evict_vm(vm, true);
ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
} while (1);
684,7 → 720,7
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct intel_engine_cs *ring,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec)
{
749,7 → 785,7
* relocations were valid.
*/
for (j = 0; j < exec[i].relocation_count; j++) {
if (copy_to_user(&user_relocs[j].presumed_offset,
if (__copy_to_user(&user_relocs[j].presumed_offset,
&invalid_offset,
sizeof(invalid_offset))) {
ret = -EFAULT;
800,7 → 836,7
}
 
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct list_head *vmas)
{
struct i915_vma *vma;
876,27 → 912,32
return 0;
}
 
static int
static struct intel_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
const u32 ctx_id)
struct intel_engine_cs *ring, const u32 ctx_id)
{
struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
 
hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
if (IS_ERR(hs))
return PTR_ERR(hs);
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
 
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
 
hs = &ctx->hang_stats;
if (hs->banned) {
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
return -EIO;
return ERR_PTR(-EIO);
}
 
return 0;
return ctx;
}
 
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
struct i915_vma *vma;
 
915,8 → 956,11
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj, ring);
 
intel_fb_obj_invalidate(obj, ring);
 
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
 
trace_i915_gem_object_change_domain(obj, old_read, old_write);
926,7 → 970,7
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
938,13 → 982,15
 
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
 
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
 
ret = intel_ring_begin(ring, 4 * 3);
if (ret)
962,141 → 1008,273
}
 
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec,
struct i915_address_space *vm)
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask, flags;
int ret, mode, i;
bool need_relocs;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_len;
int instp_mode;
u32 instp_mask;
int i, ret = 0;
 
if (!i915_gem_check_execbuffer(args))
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
 
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
return ret;
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
}
 
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
 
flags |= I915_DISPATCH_SECURE;
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto error;
}
if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED;
 
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
ring = &dev_priv->ring[VCS];
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
if (copy_from_user(cliprects,
to_user_ptr(args->cliprects_ptr),
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto error;
}
break;
case I915_EXEC_BLT:
ring = &dev_priv->ring[BCS];
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
} else {
if (args->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
args->DR4 = 0;
}
break;
case I915_EXEC_VEBOX:
ring = &dev_priv->ring[VECS];
if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
}
break;
 
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
}
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
 
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
if (ret)
goto error;
 
ret = i915_switch_context(ring, ctx);
if (ret)
goto error;
 
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4)
return -EINVAL;
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
ret = -EINVAL;
goto error;
}
 
if (instp_mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("no rel constants on pre-gen4\n");
ret = -EINVAL;
goto error;
}
 
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
ret = -EINVAL;
goto error;
}
 
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
ret = -EINVAL;
goto error;
}
 
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto error;
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
 
dev_priv->relative_constants_mode = instp_mode;
}
 
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto error;
}
 
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
 
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto error;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
return ret;
}
 
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
error:
kfree(cliprects);
return ret;
}
 
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
*/
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
 
/* Check whether the file_priv is using one ring */
if (file_priv->bsd_ring)
return file_priv->bsd_ring->id;
else {
/* If no, use the ping-pong mechanism to select one ring */
int ring_id;
 
mutex_lock(&dev->struct_mutex);
if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
ring_id = VCS;
dev_priv->mm.bsd_ring_dispatch_index = 1;
} else {
ring_id = VCS2;
dev_priv->mm.bsd_ring_dispatch_index = 0;
}
file_priv->bsd_ring = &dev_priv->ring[ring_id];
mutex_unlock(&dev->struct_mutex);
return ring_id;
}
}
 
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
{
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
 
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
* to negative relocation deltas. Usually that works out ok since the
* relocate address is still positive, except when the batch is placed
* very low in the GTT. Ensure this doesn't happen.
*
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
 
return vma->obj;
}
 
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset;
u32 flags;
int ret;
bool need_relocs;
 
if (!i915_gem_check_execbuffer(args))
return -EINVAL;
 
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
return ret;
 
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
 
flags |= I915_DISPATCH_SECURE;
}
if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED;
 
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
 
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
ring_id = gen8_dispatch_bsd_ring(dev, file);
ring = &dev_priv->ring[ring_id];
} else
ring = &dev_priv->ring[VCS];
} else
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
 
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
 
if (copy_from_user(cliprects,
to_user_ptr(args->cliprects_ptr),
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
}
 
intel_runtime_pm_get(dev_priv);
 
1110,14 → 1288,22
goto pre_mutex_err;
}
 
ret = i915_gem_validate_context(dev, file, ctx_id);
if (ret) {
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
ret = PTR_ERR(ctx);
goto pre_mutex_err;
}
 
i915_gem_context_reference(ctx);
 
vm = ctx->vm;
if (!USES_FULL_PPGTT(dev))
vm = &dev_priv->gtt.base;
 
eb = eb_create(args);
if (eb == NULL) {
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto pre_mutex_err;
1129,7 → 1315,7
goto err;
 
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
batch_obj = eb_get_batch(eb);
 
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1157,79 → 1343,55
goto err;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
#if 0
if (i915_needs_cmd_parser(ring)) {
ret = i915_parse_cmds(ring,
batch_obj,
args->batch_start_offset,
file->is_master);
if (ret)
goto err;
 
/*
* XXX: Actually do this when enabling batch copy...
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
* from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically don't
* want that set when the command parser is enabled.
*/
}
#endif
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
 
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
 
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto err;
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, mask << 16 | mode);
intel_ring_advance(ring);
 
dev_priv->relative_constants_mode = mode;
if (flags & I915_DISPATCH_SECURE &&
!batch_obj->has_global_gtt_mapping) {
/* When we have multiple VMs, we'll need to make sure that we
* allocate space first */
struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
BUG_ON(!vma);
vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
}
 
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto err;
}
if (flags & I915_DISPATCH_SECURE)
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
else
exec_start += i915_gem_obj_offset(batch_obj, vm);
 
exec_start = i915_gem_obj_offset(batch_obj, vm) +
args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
args, &eb->vmas, batch_obj, exec_start, flags);
if (ret)
goto err;
 
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto err;
}
 
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
eb_destroy(eb);
 
mutex_unlock(&dev->struct_mutex);
 
pre_mutex_err:
kfree(cliprects);
 
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
1245,7 → 1407,6
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
1301,23 → 1462,25
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
 
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
&dev_priv->gtt.base);
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
struct drm_i915_gem_exec_object __user *user_exec_list =
to_user_ptr(args->buffers_ptr);
 
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
exec_list,
sizeof(*exec_list) * args->buffer_count);
for (i = 0; i < args->buffer_count; i++) {
ret = __copy_to_user(&user_exec_list[i].offset,
&exec2_list[i].offset,
sizeof(user_exec_list[i].offset));
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
break;
}
}
}
 
drm_free_large(exec_list);
drm_free_large(exec2_list);
1329,7 → 1492,6
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
1340,6 → 1502,11
return -EINVAL;
}
 
if (args->rsvd2 != 0) {
DRM_DEBUG("dirty rvsd2 field\n");
return -EINVAL;
}
 
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL) {
1358,20 → 1525,26
return -EFAULT;
}
 
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
&dev_priv->gtt.base);
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
struct drm_i915_gem_exec_object2 __user *user_exec_list =
to_user_ptr(args->buffers_ptr);
int i;
 
for (i = 0; i < args->buffer_count; i++) {
ret = __copy_to_user(&user_exec_list[i].offset,
&exec2_list[i].offset,
sizeof(user_exec_list[i].offset));
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
"back to user\n",
args->buffer_count);
break;
}
}
}
 
kfree(exec2_list);
return ret;
/drivers/video/drm/i915/i915_gem_gtt.c
1,5 → 1,6
/*
* Copyright © 2010 Daniel Vetter
* Copyright © 2011-2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
35,58 → 36,75
#include "i915_trace.h"
#include "intel_drv.h"
 
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
 
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
{
if (i915.enable_ppgtt == 0)
return false;
 
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
if (i915.enable_ppgtt == 1 && full)
return false;
 
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
return true;
}
 
/* Cacheability Control is a 4-bit value. The low three bits are stored in *
* bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
*/
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
(((bits) & 0x8) << (11 - 3)))
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
return 0;
 
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
#define GEN8_LEGACY_PDPS 4
if (enable_ppgtt == 1)
return 1;
 
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
if (enable_ppgtt == 2 && HAS_PPGTT(dev))
return 2;
 
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0;
}
#endif
 
/* Early VLV doesn't have this */
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0;
}
 
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
}
 
 
static void ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
 
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
{
gen8_gtt_pte_t pte = valid ? 1 | 2 : 0;
gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
if (level != I915_CACHE_NONE)
 
switch (level) {
case I915_CACHE_NONE:
pte |= PPAT_UNCACHED_INDEX;
break;
case I915_CACHE_WT:
pte |= PPAT_DISPLAY_ELLC_INDEX;
break;
default:
pte |= PPAT_CACHED_INDEX;
else
pte |= PPAT_UNCACHED_INDEX;
break;
}
 
return pte;
}
 
105,7 → 123,7
 
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
127,7 → 145,7
 
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
149,12 → 167,9
return pte;
}
 
#define BYT_PTE_WRITEABLE (1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
 
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 flags)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
162,6 → 177,7
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
if (!(flags & PTE_READ_ONLY))
pte |= BYT_PTE_WRITEABLE;
 
if (level != I915_CACHE_NONE)
172,7 → 188,7
 
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
185,7 → 201,7
 
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
205,13 → 221,20
}
 
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
uint64_t val)
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val, bool synchronous)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
 
BUG_ON(entry >= 4);
 
if (synchronous) {
I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
return 0;
}
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
227,48 → 250,37
return 0;
}
 
static int gen8_ppgtt_enable(struct drm_device *dev)
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i, j, ret;
int i, ret;
 
/* bit of a hack to find the actual last used pd */
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
 
for_each_ring(ring, dev_priv, j) {
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
 
for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i];
for_each_ring(ring, dev_priv, j) {
ret = gen8_write_pdp(ring, i, addr);
ret = gen8_write_pdp(ring, i, addr, synchronous);
if (ret)
goto err_out;
return ret;
}
}
 
return 0;
 
err_out:
for_each_ring(ring, dev_priv, j)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
return ret;
}
 
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries,
uint64_t start,
uint64_t length,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
 
pt_vaddr = (gen8_gtt_pte_t*)AllocKernelSpace(4096);
279,34 → 291,42
I915_CACHE_LLC, use_scratch);
 
while (num_entries) {
struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
 
last_pte = first_pte + num_entries;
last_pte = pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE)
last_pte = GEN8_PTES_PER_PAGE;
 
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
MapPage(pt_vaddr,(addr_t)page_table, PG_SW);
 
for (i = first_pte; i < last_pte; i++)
for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
num_entries--;
}
 
num_entries -= last_pte - first_pte;
first_pte = 0;
act_pt++;
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
 
pte = 0;
if (++pde == GEN8_PDES_PER_PAGE) {
pdpe++;
pde = 0;
}
}
FreeKernelSpace(pt_vaddr);
}
 
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
struct sg_page_iter sg_iter;
 
pt_vaddr = AllocKernelSpace(4096);
313,157 → 333,317
if(pt_vaddr == NULL)
return;
 
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3);
 
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
break;
 
pt_vaddr[act_pte] =
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
if (++act_pte == GEN8_PTES_PER_PAGE) {
act_pt++;
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
act_pte = 0;
if (++pte == GEN8_PTES_PER_PAGE) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
if (++pde == GEN8_PDES_PER_PAGE) {
pdpe++;
pde = 0;
}
pte = 0;
MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3);
}
}
FreeKernelSpace(pt_vaddr);
}
 
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
static void gen8_free_page_tables(struct page **pt_pages)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
int i, j;
int i;
 
drm_mm_takedown(&vm->mm);
if (pt_pages == NULL)
return;
 
// for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
// if (pt_pages[i])
// __free_pages(pt_pages[i], 0);
}
 
static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
{
int i;
 
for (i = 0; i < ppgtt->num_pd_pages ; i++) {
if (ppgtt->pd_dma_addr[i]) {
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pd_dma_addr[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
kfree(ppgtt->gen8_pt_pages[i]);
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
 
// __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
 
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
struct pci_dev *hwdev = ppgtt->base.dev->pdev;
int i, j;
 
for (i = 0; i < ppgtt->num_pd_pages; i++) {
/* TODO: In the future we'll support sparse mappings, so this
* will have to change. */
if (!ppgtt->pd_dma_addr[i])
continue;
 
pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
 
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
if (addr)
pci_unmap_page(ppgtt->base.dev->pdev,
addr,
PAGE_SIZE,
pci_unmap_page(hwdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
}
}
}
 
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
 
list_del(&vm->global_link);
drm_mm_takedown(&vm->mm);
 
gen8_ppgtt_unmap_pages(ppgtt);
gen8_ppgtt_free(ppgtt);
}
 
static struct page **__gen8_alloc_page_tables(void)
{
struct page **pt_pages;
int i;
 
pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
if (!pt_pages)
return ERR_PTR(-ENOMEM);
 
for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
pt_pages[i] = alloc_page(GFP_KERNEL);
if (!pt_pages[i])
goto bail;
}
kfree(ppgtt->gen8_pt_dma_addr[i]);
 
return pt_pages;
 
bail:
gen8_free_page_tables(pt_pages);
kfree(pt_pages);
return ERR_PTR(-ENOMEM);
}
 
// __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
// __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
{
struct page **pt_pages[GEN8_LEGACY_PDPS];
int i, ret;
 
for (i = 0; i < max_pdp; i++) {
pt_pages[i] = __gen8_alloc_page_tables();
if (IS_ERR(pt_pages[i])) {
ret = PTR_ERR(pt_pages[i]);
goto unwind_out;
}
}
 
/**
* GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
* net effect resembling a 2-level page table in normal x86 terms. Each PDP
* represents 1GB of memory
* 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
*
* TODO: Do something with the size parameter
**/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
/* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
* "atomic" - for cleanup purposes.
*/
for (i = 0; i < max_pdp; i++)
ppgtt->gen8_pt_pages[i] = pt_pages[i];
 
return 0;
 
unwind_out:
while (i--) {
gen8_free_page_tables(pt_pages[i]);
kfree(pt_pages[i]);
}
 
return ret;
}
 
static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
{
struct page *pt_pages;
int i, j, ret = -ENOMEM;
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
int i;
 
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
for (i = 0; i < ppgtt->num_pd_pages; i++) {
ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->gen8_pt_dma_addr[i])
return -ENOMEM;
}
 
/* FIXME: split allocation into smaller pieces. For now we only ever do
* this once, but with full PPGTT, the multiple contiguous allocations
* will be bad.
*/
ppgtt->pd_pages = AllocPages(max_pdp);
return 0;
}
 
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
{
// ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
if (!ppgtt->pd_pages)
return -ENOMEM;
 
pt_pages = AllocPages(num_pt_pages);
if (!pt_pages) {
// ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 
return 0;
}
 
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
{
int ret;
 
ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
if (ret)
return ret;
 
ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
if (ret) {
// __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
return -ENOMEM;
return ret;
}
 
ppgtt->gen8_pt_pages = pt_pages;
ppgtt->num_pd_pages = max_pdp;
ppgtt->num_pt_pages = num_pt_pages;
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
ppgtt->enable = gen8_ppgtt_enable;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
ret = gen8_ppgtt_allocate_dma(ppgtt);
if (ret)
gen8_ppgtt_free(ppgtt);
 
/*
* - Create a mapping for the page directories.
* - For each page directory:
* allocate space for page table mappings.
* map each page table
*/
for (i = 0; i < max_pdp; i++) {
dma_addr_t temp;
temp = pci_map_page(ppgtt->base.dev->pdev,
&ppgtt->pd_pages[i], 0,
return ret;
}
 
static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
const int pd)
{
dma_addr_t pd_addr;
int ret;
 
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
&ppgtt->pd_pages[pd], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 
ppgtt->pd_dma_addr[i] = temp;
// ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
// if (ret)
// return ret;
 
ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
if (!ppgtt->gen8_pt_dma_addr[i])
goto err_out;
ppgtt->pd_dma_addr[pd] = pd_addr;
 
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
temp = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
return 0;
}
 
ppgtt->gen8_pt_dma_addr[i][j] = temp;
static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
const int pd,
const int pt)
{
dma_addr_t pt_addr;
struct page *p;
int ret;
 
p = ppgtt->gen8_pt_pages[pd][pt];
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
// ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
// if (ret)
// return ret;
 
ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
 
return 0;
}
 
/**
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
* space.
*
* FIXME: split allocation into smaller pieces. For now we only ever do this
* once, but with full PPGTT, the multiple contiguous allocations will be bad.
* TODO: Do something with the size parameter
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
int i, j, ret;
gen8_ppgtt_pde_t *pd_vaddr;
 
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
 
/* 1. Do all our allocations for page directories and page tables. */
ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
if (ret)
return ret;
 
/*
* 2. Create DMA mappings for the page directories and page tables.
*/
for (i = 0; i < max_pdp; i++) {
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
if (ret)
goto bail;
 
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
if (ret)
goto bail;
}
}
 
/* For now, the PPGTT helper functions all require that the PDEs are
/*
* 3. Map all the page directory entires to point to the page tables
* we've allocated.
*
* For now, the PPGTT helper functions all require that the PDEs are
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again */
* will never need to touch the PDEs again.
*/
 
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = AllocKernelSpace(4096);
 
for (i = 0; i < max_pdp; i++) {
MapPage(pd_vaddr,(addr_t)(ppgtt->pd_pages[i]), 3);
MapPage(pd_vaddr,(addr_t)(&ppgtt->pd_pages[i]), 3);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
}
FreeKernelSpace(pd_vaddr);
 
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
true);
ppgtt->enable = gen8_ppgtt_enable;
ppgtt->switch_mm = gen8_mm_switch;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
ppgtt->num_pt_pages,
(ppgtt->num_pt_pages - num_pt_pages) +
size % (1<<30));
ppgtt->num_pd_entries,
(ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
return 0;
 
err_out:
ppgtt->base.cleanup(&ppgtt->base);
bail:
gen8_ppgtt_unmap_pages(ppgtt);
gen8_ppgtt_free(ppgtt);
return ret;
}
 
489,38 → 669,163
readl(pd_addr);
}
 
static int gen6_ppgtt_enable(struct drm_device *dev)
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
 
BUG_ON(ppgtt->pd_offset & 0x3f);
 
gen6_write_pdes(ppgtt);
return (ppgtt->pd_offset / 64) << 16;
}
 
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
/* If we're in reset, we can assume the GPU is sufficiently idle to
* manually frob these bits. Ideally we could use the ring functions,
* except our error handling makes it quite difficult (can't use
* intel_ring_begin, ring->flush, or intel_ring_advance)
*
* FIXME: We should try not to special case reset
*/
if (synchronous ||
i915_reset_in_progress(&dev_priv->gpu_error)) {
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
POSTING_READ(RING_PP_DIR_BASE(ring));
return 0;
}
 
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
ECOBITS_PPGTT_CACHE64B);
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
 
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
return 0;
}
 
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
/* If we're in reset, we can assume the GPU is sufficiently idle to
* manually frob these bits. Ideally we could use the ring functions,
* except our error handling makes it quite difficult (can't use
* intel_ring_begin, ring->flush, or intel_ring_advance)
*
* FIXME: We should try not to special case reset
*/
if (synchronous ||
i915_reset_in_progress(&dev_priv->gpu_error)) {
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
POSTING_READ(RING_PP_DIR_BASE(ring));
return 0;
}
 
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (ring->id != RCS) {
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
 
return 0;
}
 
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!synchronous)
return 0;
 
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
 
POSTING_READ(RING_PP_DIR_DCLV(ring));
 
return 0;
}
 
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int j, ret;
 
for_each_ring(ring, dev_priv, j) {
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
/* We promise to do a switch later with FULL PPGTT. If this is
* aliasing, this is the one and only switch we'll do */
if (USES_FULL_PPGTT(dev))
continue;
 
ret = ppgtt->switch_mm(ppgtt, ring, true);
if (ret)
goto err_out;
}
 
return 0;
 
err_out:
for_each_ring(ring, dev_priv, j)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
return ret;
}
 
static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t ecochk, ecobits;
int i;
 
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
533,34 → 838,71
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
I915_WRITE(GAM_ECOCHK, ecochk);
/* GFX_MODE is per-ring on gen7+ */
}
 
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
int ret;
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
/* We promise to do a switch later with FULL PPGTT. If this is
* aliasing, this is the one and only switch we'll do */
if (USES_FULL_PPGTT(dev))
continue;
 
ret = ppgtt->switch_mm(ppgtt, ring, true);
if (ret)
return ret;
}
 
return 0;
}
 
static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t ecochk, gab_ctl, ecobits;
int i;
 
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
ECOBITS_PPGTT_CACHE64B);
 
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
 
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
for_each_ring(ring, dev_priv, i) {
int ret = ppgtt->switch_mm(ppgtt, ring, true);
if (ret)
return ret;
}
 
return 0;
}
 
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries,
uint64_t start,
uint64_t length,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
 
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
 
pt_vaddr = AllocKernelSpace(4096);
 
587,18 → 929,17
 
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
uint64_t start,
enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sg_page_iter sg_iter;
dma_addr_t page_addr;
 
 
pt_vaddr = AllocKernelSpace(4096);
 
if(pt_vaddr == NULL)
609,25 → 950,21
 
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
cache_level, true, flags);
 
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
act_pt++;
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
act_pte = 0;
 
}
}
FreeKernelSpace(pt_vaddr);
}
 
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
int i;
 
drm_mm_takedown(&ppgtt->base.mm);
 
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->base.dev->pdev,
634,52 → 971,120
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
}
 
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
 
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
kfree(ppgtt);
}
 
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
 
list_del(&vm->global_link);
drm_mm_takedown(&ppgtt->base.mm);
drm_mm_remove_node(&ppgtt->node);
 
gen6_ppgtt_unmap_pages(ppgtt);
gen6_ppgtt_free(ppgtt);
}
 
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned first_pd_entry_in_global_pt;
int i;
int ret = -ENOMEM;
bool retried = false;
int ret;
 
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
0, dev_priv->gtt.base.total,
DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE,
0, dev_priv->gtt.base.total,
0);
if (ret)
return ret;
 
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
retried = true;
goto alloc;
}
 
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
 
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->base.start = 0;
ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
return ret;
}
 
static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
int i;
 
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
 
if (!ppgtt->pt_pages)
return -ENOMEM;
 
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i])
goto err_pt_alloc;
if (!ppgtt->pt_pages[i]) {
gen6_ppgtt_free(ppgtt);
return -ENOMEM;
}
}
 
return 0;
}
 
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
int ret;
 
ret = gen6_ppgtt_allocate_page_directories(ppgtt);
if (ret)
return ret;
 
ret = gen6_ppgtt_allocate_page_tables(ppgtt);
if (ret) {
drm_mm_remove_node(&ppgtt->node);
return ret;
}
 
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
if (!ppgtt->pt_dma_addr) {
drm_mm_remove_node(&ppgtt->node);
gen6_ppgtt_free(ppgtt);
return -ENOMEM;
}
 
return 0;
}
 
static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
int i;
 
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
 
686,44 → 1091,72
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
PCI_DMA_BIDIRECTIONAL);
 
// if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
// gen6_ppgtt_unmap_pages(ppgtt);
// return -EIO;
// }
 
ppgtt->pt_dma_addr[i] = pt_addr;
}
 
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
return 0;
}
 
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
return 0;
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
if (IS_GEN6(dev)) {
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) {
ppgtt->enable = gen7_ppgtt_enable;
ppgtt->switch_mm = hsw_mm_switch;
} else if (IS_GEN7(dev)) {
ppgtt->enable = gen7_ppgtt_enable;
ppgtt->switch_mm = gen7_mm_switch;
} else
BUG();
 
err_pd_pin:
if (ppgtt->pt_dma_addr) {
for (i--; i >= 0; i--)
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
err_pt_alloc:
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
if (ppgtt->pt_pages[i])
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
ret = gen6_ppgtt_alloc(ppgtt);
if (ret)
return ret;
 
ret = gen6_ppgtt_setup_page_tables(ppgtt);
if (ret) {
gen6_ppgtt_free(ppgtt);
return ret;
}
 
static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
// ppgtt->debug_dump = gen6_dump_ppgtt;
 
ppgtt->pd_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
 
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
 
return 0;
}
 
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
int ret;
int ret = 0;
 
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return -ENOMEM;
 
ppgtt->base.dev = dev;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
732,44 → 1165,40
else
BUG();
 
if (ret)
kfree(ppgtt);
else {
dev_priv->mm.aliasing_ppgtt = ppgtt;
if (!ret) {
struct drm_i915_private *dev_priv = dev->dev_private;
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
i915_init_vm(dev_priv, &ppgtt->base);
if (INTEL_INFO(dev)->gen < 8) {
gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
ppgtt->pd_offset << 10);
}
}
 
return ret;
}
 
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
static void
ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
flags |= PTE_READ_ONLY;
 
if (!ppgtt)
return;
 
ppgtt->base.cleanup(&ppgtt->base);
dev_priv->mm.aliasing_ppgtt = NULL;
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
cache_level, flags);
}
 
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level);
}
 
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
ppgtt->base.clear_range(&ppgtt->base,
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
vma->vm->clear_range(vma->vm,
vma->node.start,
vma->obj->base.size,
true);
}
 
814,7 → 1243,7
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
int i;
 
if (INTEL_INFO(dev)->gen < 6)
853,9 → 1282,9
i915_check_and_clear_faults(dev);
 
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
false);
dev_priv->gtt.base.start,
dev_priv->gtt.base.total,
true);
}
 
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
862,20 → 1291,52
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
 
i915_check_and_clear_faults(dev);
 
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
dev_priv->gtt.base.start,
dev_priv->gtt.base.total,
true);
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj,
&dev_priv->gtt.base);
if (!vma)
continue;
 
i915_gem_clflush_object(obj, obj->pin_display);
i915_gem_gtt_bind_object(obj, obj->cache_level);
/* The bind_vma code tries to be smart about tracking mappings.
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*/
obj->has_global_gtt_mapping = 0;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
}
 
 
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
 
return;
}
 
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */
if (i915_is_ggtt(vm)) {
if (dev_priv->mm.aliasing_ppgtt)
gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
continue;
}
 
gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
}
 
i915_gem_chipset_flush(dev);
}
 
904,15 → 1365,16
 
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
uint64_t start,
enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
gen8_gtt_pte_t __iomem *gtt_entries =
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr;
dma_addr_t addr = 0; /* shut up gcc */
 
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) +
949,19 → 1411,20
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
uint64_t start,
enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr;
dma_addr_t addr = 0;
 
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
i++;
}
 
971,9 → 1434,10
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) !=
vm->pte_encode(addr, level, true));
if (i != 0) {
unsigned long gtt = readl(&gtt_entries[i-1]);
WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
}
 
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
984,11 → 1448,13
}
 
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
uint64_t start,
uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1008,11 → 1474,13
}
 
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
uint64_t start,
uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1023,7 → 1491,7
first_entry, num_entries, max_entries))
num_entries = max_entries;
 
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
 
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
1030,55 → 1498,109
readl(gtt_base);
}
 
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level)
 
static void i915_ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
const unsigned long entry = vma->node.start >> PAGE_SHIFT;
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
 
intel_gtt_insert_sg_entries(st, pg_start, flags);
 
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
vma->obj->has_global_gtt_mapping = 1;
}
 
static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
uint64_t start,
uint64_t length,
bool unused)
{
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
intel_gtt_clear_range(first_entry, num_entries);
}
 
static void i915_ggtt_unbind_vma(struct i915_vma *vma)
{
const unsigned int first = vma->node.start >> PAGE_SHIFT;
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
 
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
BUG_ON(!i915_is_ggtt(vma->vm));
vma->obj->has_global_gtt_mapping = 0;
intel_gtt_clear_range(first, size);
}
 
static void ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
 
dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
entry,
cache_level);
/* Currently applicable only to VLV */
if (obj->gt_ro)
flags |= PTE_READ_ONLY;
 
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
*
* If there is an aliasing PPGTT it is anecdotally faster, so use that
* instead if none of the above hold true.
*
* NB: A global mapping should only be needed for special regions like
* "gtt mappable", SNB errata, or if specified via special execbuf
* flags. At all other times, the GPU will use the aliasing PPGTT.
*/
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!obj->has_global_gtt_mapping ||
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
cache_level, flags);
obj->has_global_gtt_mapping = 1;
}
}
 
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
if (dev_priv->mm.aliasing_ppgtt &&
(!obj->has_aliasing_ppgtt_mapping ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
}
 
static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_device *dev = obj->base.dev;
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
 
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
entry,
obj->base.size >> PAGE_SHIFT,
if (obj->has_global_gtt_mapping) {
vma->vm->clear_range(vma->vm,
vma->node.start,
obj->base.size,
true);
 
obj->has_global_gtt_mapping = 0;
}
 
if (obj->has_aliasing_ppgtt_mapping) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
obj->base.size,
true);
obj->has_aliasing_ppgtt_mapping = 0;
}
}
 
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
1158,31 → 1680,16
 
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
ggtt_vm->clear_range(ggtt_vm, hole_start,
hole_end - hole_start, true);
}
 
/* And finally clear the reserved guard page */
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
}
 
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
 
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
 
return true;
}
 
void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1191,28 → 1698,8
gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
 
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
int ret;
 
if (INTEL_INFO(dev)->gen <= 7) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
 
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 
ret = i915_gem_init_aliasing_ppgtt(dev);
if (!ret)
return;
 
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->gtt.base.mm);
if (INTEL_INFO(dev)->gen < 8)
gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
 
static int setup_scratch_page(struct drm_device *dev)
{
1265,14 → 1752,27
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
if (bdw_gmch_ctl > 4) {
WARN_ON(!i915_preliminary_hw_support);
return 4<<20;
}
 
#ifdef CONFIG_X86_32
/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
if (bdw_gmch_ctl > 4)
bdw_gmch_ctl = 4;
#endif
 
return bdw_gmch_ctl << 20;
}
 
static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
{
gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
gmch_ctrl &= SNB_GMCH_GGMS_MASK;
 
if (gmch_ctrl)
return 1 << (20 + gmch_ctrl);
 
return 0;
}
 
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
1287,6 → 1787,24
return bdw_gmch_ctl << 25; /* 32 MB units */
}
 
static size_t chv_get_stolen_size(u16 gmch_ctrl)
{
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
gmch_ctrl &= SNB_GMCH_GMS_MASK;
 
/*
* 0x0 to 0x10: 32MB increments starting at 0MB
* 0x11 to 0x16: 4MB increments starting at 8MB
* 0x17 to 0x1d: 4MB increments start at 36MB
*/
if (gmch_ctrl < 0x11)
return gmch_ctrl << 25;
else if (gmch_ctrl < 0x17)
return (gmch_ctrl - 0x11 + 2) << 22;
else
return (gmch_ctrl - 0x17 + 9) << 22;
}
 
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
1317,19 → 1835,8
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
{
#define GEN8_PPAT_UC (0<<0)
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_WT (2<<0)
#define GEN8_PPAT_WB (3<<0)
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
/* FIXME(BDW): Bspec is completely confused about cache control bits. */
#define GEN8_PPAT_LLC (1<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_AGE(x) (x<<4)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
uint64_t pat;
 
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
1347,6 → 1854,33
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}
 
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
uint64_t pat;
 
/*
* Map WB on BDW to snooped on CHV.
*
* Only the snoop bit has meaning for CHV, the rest is
* ignored.
*
* Note that the harware enforces snooping for all page
* table accesses. The snoop bit is actually ignored for
* PDEs.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |
GEN8_PPAT(2, 0) |
GEN8_PPAT(3, 0) |
GEN8_PPAT(4, CHV_PPAT_SNOOP) |
GEN8_PPAT(5, CHV_PPAT_SNOOP) |
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
GEN8_PPAT(7, CHV_PPAT_SNOOP);
 
I915_WRITE(GEN8_PRIVATE_PAT, pat);
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}
 
static int gen8_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
1367,12 → 1901,20
 
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
if (IS_CHERRYVIEW(dev)) {
*stolen = chv_get_stolen_size(snb_gmch_ctl);
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
*stolen = gen8_get_stolen_size(snb_gmch_ctl);
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
 
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
 
gen8_setup_private_ppat(dev_priv);
if (IS_CHERRYVIEW(dev))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
 
ret = ggtt_probe_common(dev, gtt_size);
 
1426,6 → 1968,11
{
 
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
if (drm_mm_initialized(&vm->mm)) {
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
}
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
1449,7 → 1996,6
 
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
if (unlikely(dev_priv->gtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
1501,10 → 2047,81
gtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
#endif
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
return 0;
}
 
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
 
INIT_LIST_HEAD(&vma->vma_link);
INIT_LIST_HEAD(&vma->mm_list);
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
 
switch (INTEL_INFO(vm->dev)->gen) {
case 8:
case 7:
case 6:
if (i915_is_ggtt(vm)) {
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
} else {
vma->unbind_vma = ppgtt_unbind_vma;
vma->bind_vma = ppgtt_bind_vma;
}
break;
case 5:
case 4:
case 3:
case 2:
BUG_ON(!i915_is_ggtt(vm));
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
break;
default:
BUG();
}
 
/* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list);
else
list_add_tail(&vma->vma_link, &obj->vma_list);
 
return vma;
}
 
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
 
vma = i915_gem_obj_to_vma(obj, vm);
if (!vma)
vma = __i915_gem_vma_create(obj, vm);
 
return vma;
}
 
struct scatterlist *sg_next(struct scatterlist *sg)
{
if (sg_is_last(sg))
1519,7 → 2136,7
 
 
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
sg_free_fn *free_fn)
bool skip_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
 
1547,16 → 2164,18
}
 
table->orig_nents -= sg_size;
if (!skip_first_chunk) {
kfree(sgl);
skip_first_chunk = false;
}
sgl = next;
}
 
table->sgl = NULL;
}
 
void sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, NULL);
}
 
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1622,7 → 2241,7
return 0;
 
err:
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, NULL);
 
return -ENOMEM;
}
/drivers/video/drm/i915/i915_gem_gtt.h
0,0 → 1,285
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please try to maintain the following order within this file unless it makes
* sense to do otherwise. From top to bottom:
* 1. typedefs
* 2. #defines, and macros
* 3. structure definitions
* 4. function prototypes
*
* Within each section, please try to order by generation in ascending order,
* from top to bottom (ie. gen6 on the top, gen8 on the bottom).
*/
 
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
 
typedef uint32_t gen6_gtt_pte_t;
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
 
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
 
#define GEN6_PPGTT_PD_ENTRIES 512
#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
#define GEN6_PDE_VALID (1 << 0)
 
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
 
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
#define BYT_PTE_WRITEABLE (1 << 1)
 
/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
* 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
*/
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
(((bits) & 0x8) << (11 - 3)))
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
#define HSW_PTE_UNCACHED (0)
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
 
/* GEN8 legacy style address is defined as a 3 level page table:
* 31:30 | 29:21 | 20:12 | 11:0
* PDPE | PDE | PTE | offset
* The difference as compared to normal x86 3 level page table is the PDPEs are
* programmed via register.
*/
#define GEN8_PDPE_SHIFT 30
#define GEN8_PDPE_MASK 0x3
#define GEN8_PDE_SHIFT 21
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
#define GEN8_LEGACY_PDPS 4
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
 
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
 
#define CHV_PPAT_SNOOP (1<<6)
#define GEN8_PPAT_AGE(x) (x<<4)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLC (1<<2)
#define GEN8_PPAT_WB (3<<0)
#define GEN8_PPAT_WT (2<<0)
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_UC (0<<0)
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
 
enum i915_cache_level;
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
* object into/from the address space.
*
* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
* will always be <= an objects lifetime. So object refcounting should cover us.
*/
struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
 
/** This object's place on the active/inactive lists */
struct list_head mm_list;
 
struct list_head vma_link; /* Link in the object's VMA list */
 
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
 
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
 
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
* (via user_pin_count), execbuffer (objects are not allowed multiple
* times for the same batchbuffer), and the framebuffer code. When
* switching/pageflipping, the framebuffer code has at most two buffers
* pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
};
 
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
struct list_head global_link;
unsigned long start; /* Start offset always 0 for dri2 */
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
 
struct {
dma_addr_t addr;
struct page *page;
} scratch;
 
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
 
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
 
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
};
 
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
* portion of the GTT which can be mapped by the CPU and remain both coherent
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
struct i915_gtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
 
unsigned long mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
 
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
 
bool do_idle_maps;
 
int mtrr;
 
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
};
 
struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
struct page **pt_pages;
struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
};
struct page *pd_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
};
union {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[4];
};
 
struct intel_context *ctx;
 
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous);
// void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
 
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
 
bool intel_enable_ppgtt(struct drm_device *dev, bool full);
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 
#endif
/drivers/video/drm/i915/i915_gem_render_state.c
0,0 → 1,169
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Mika Kuoppala <mika.kuoppala@intel.com>
*
*/
 
#include "i915_drv.h"
#include "intel_renderstate.h"
 
struct render_state {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
};
 
static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen)
{
switch (gen) {
case 6:
return &gen6_null_state;
case 7:
return &gen7_null_state;
case 8:
return &gen8_null_state;
}
 
return NULL;
}
 
static int render_state_init(struct render_state *so, struct drm_device *dev)
{
int ret;
 
so->gen = INTEL_INFO(dev)->gen;
so->rodata = render_state_get_rodata(dev, so->gen);
if (so->rodata == NULL)
return 0;
 
if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
 
so->obj = i915_gem_alloc_object(dev, 4096);
if (so->obj == NULL)
return -ENOMEM;
 
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret)
goto free_gem;
 
so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
return 0;
 
free_gem:
drm_gem_object_unreference(&so->obj->base);
return ret;
}
 
static int render_state_setup(struct render_state *so)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
struct page *page;
u32 *d;
int ret;
 
ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
if (ret)
return ret;
 
page = sg_page(so->obj->pages->sgl);
d = kmap(page);
 
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
 
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->ggtt_offset;
s = lower_32_bits(r);
if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
return -EINVAL;
 
d[i++] = s;
s = upper_32_bits(r);
}
 
reloc_index++;
}
 
d[i++] = s;
}
FreeKernelSpace(d);
 
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
if (ret)
return ret;
 
if (rodata->reloc[reloc_index] != -1) {
DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
 
return 0;
}
 
static void render_state_fini(struct render_state *so)
{
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
}
 
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
struct render_state so;
int ret;
 
if (WARN_ON(ring->id != RCS))
return -ENOENT;
 
ret = render_state_init(&so, ring->dev);
if (ret)
return ret;
 
if (so.rodata == NULL)
return 0;
 
ret = render_state_setup(&so);
if (ret)
goto out;
 
ret = ring->dispatch_execbuffer(ring,
so.ggtt_offset,
so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
 
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
 
ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
out:
render_state_fini(&so);
return ret;
}
/drivers/video/drm/i915/i915_gem_stolen.c
73,7 → 73,52
 
if (base == 0)
return 0;
 
/* make sure we don't clobber the GTT if it's within stolen memory */
if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
struct {
u32 start, end;
} stolen[2] = {
{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
};
u64 gtt_start, gtt_end;
 
gtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev))
gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
gtt_start &= PGTBL_ADDRESS_LO_MASK;
gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
 
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
stolen[0].end = gtt_start;
if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
stolen[1].start = gtt_end;
 
/* pick the larger of the two chunks */
if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) {
base = stolen[0].start;
dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
} else {
base = stolen[1].start;
dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
}
 
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
(unsigned long long) gtt_start,
(unsigned long long) gtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
base, base + (u32) dev_priv->gtt.stolen_size - 1);
}
}
 
#if 0
 
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
90,30 → 135,68
return base;
}
 
static int i915_setup_compression(struct drm_device *dev, int size)
static int find_compression_threshold(struct drm_device *dev,
struct drm_mm_node *node,
int size,
int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
int compression_threshold = 1;
int ret;
 
compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
if (!compressed_fb)
goto err_llb;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
*
* The enable_fbc code will attempt to use one of our 2 compression
* thresholds, therefore, in that case, we only have 1 resort.
*/
 
/* Try to over-allocate to reduce reallocations and fragmentation */
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
/* Try to over-allocate to reduce reallocations and fragmentation. */
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret)
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
if (ret == 0)
return compression_threshold;
 
again:
/* HW's ability to limit the CFB is 1:4 */
if (compression_threshold > 4 ||
(fb_cpp == 2 && compression_threshold == 2))
return 0;
 
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT);
if (ret)
if (ret && INTEL_INFO(dev)->gen <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
goto again;
} else {
return compression_threshold;
}
}
 
static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
 
ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
else if (ret > 1) {
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
 
}
 
dev_priv->fbc.threshold = ret;
 
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
127,13 → 210,12
dev_priv->fbc.compressed_llb = compressed_llb;
 
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start);
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
 
dev_priv->fbc.compressed_fb = compressed_fb;
dev_priv->fbc.size = size;
dev_priv->fbc.size = size / dev_priv->fbc.threshold;
 
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
142,14 → 224,12
 
err_fb:
kfree(compressed_llb);
drm_mm_remove_node(compressed_fb);
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb:
kfree(compressed_fb);
// pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
 
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
162,7 → 242,7
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
 
return i915_setup_compression(dev, size);
return i915_setup_compression(dev, size, fb_cpp);
}
 
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
172,10 → 252,7
if (dev_priv->fbc.size == 0)
return;
 
if (dev_priv->fbc.compressed_fb) {
drm_mm_remove_node(dev_priv->fbc.compressed_fb);
kfree(dev_priv->fbc.compressed_fb);
}
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
 
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
201,6 → 278,13
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
 
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
#endif
 
if (dev_priv->gtt.stolen_size == 0)
return 0;
 
272,9 → 356,20
kfree(obj->pages);
}
 
 
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_remove_node(obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
.release = i915_gem_object_release_stolen,
};
 
static struct drm_i915_gem_object *
432,13 → 527,3
drm_gem_object_unreference(&obj->base);
return NULL;
}
 
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_remove_node(obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
}
/drivers/video/drm/i915/i915_gem_tiling.c
87,7 → 87,7
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
294,7 → 294,7
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret = 0;
 
308,7 → 308,7
return -EINVAL;
}
 
if (obj->pin_count || obj->framebuffer_references) {
if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
415,7 → 415,7
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
/drivers/video/drm/i915/i915_gpu_error.c
42,6 → 42,7
case VCS: return "bsd";
case BCS: return "blt";
case VECS: return "vebox";
case VCS2: return "bsd2";
default: return "";
}
}
146,7 → 147,10
va_list tmp;
 
va_copy(tmp, args);
if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
len = vsnprintf(NULL, 0, f, tmp);
va_end(tmp);
 
if (!__i915_error_seek(e, len))
return;
}
 
201,6 → 205,7
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : "");
err_puts(m, err->ring != -1 ? " " : "");
err_puts(m, ring_str(err->ring));
err_puts(m, i915_cache_level_str(err->cache_level));
235,50 → 240,62
 
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct drm_i915_error_state *error,
unsigned ring)
struct drm_i915_error_ring *ring)
{
BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
if (!error->ring[ring].valid)
if (!ring->valid)
return;
 
err_printf(m, "%s command stream:\n", ring_str(ring));
err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
err_printf(m, " HEAD: 0x%08x\n", ring->head);
err_printf(m, " TAIL: 0x%08x\n", ring->tail);
err_printf(m, " CTL: 0x%08x\n", ring->ctl);
err_printf(m, " HWS: 0x%08x\n", ring->hws);
err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
if (INTEL_INFO(dev)->gen >= 4) {
err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]);
err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
}
err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
lower_32_bits(ring->faddr));
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
error->semaphore_mboxes[ring][0],
error->semaphore_seqno[ring][0]);
ring->semaphore_mboxes[0],
ring->semaphore_seqno[0]);
err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
error->semaphore_mboxes[ring][1],
error->semaphore_seqno[ring][1]);
ring->semaphore_mboxes[1],
ring->semaphore_seqno[1]);
if (HAS_VEBOX(dev)) {
err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
error->semaphore_mboxes[ring][2],
error->semaphore_seqno[ring][2]);
ring->semaphore_mboxes[2],
ring->semaphore_seqno[2]);
}
}
err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
if (USES_PPGTT(dev)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
 
if (INTEL_INFO(dev)->gen >= 8) {
int i;
for (i = 0; i < 4; i++)
err_printf(m, " PDP%d: 0x%016llx\n",
i, ring->vm_info.pdp[i]);
} else {
err_printf(m, " PP_DIR_BASE: 0x%08x\n",
ring->vm_info.pp_dir_base);
}
}
err_printf(m, " seqno: 0x%08x\n", ring->seqno);
err_printf(m, " waiting: %s\n", yesno(ring->waiting));
err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
err_printf(m, " hangcheck: %s [%d]\n",
hangcheck_action_to_str(error->hangcheck_action[ring]),
error->hangcheck_score[ring]);
hangcheck_action_to_str(ring->hangcheck_action),
ring->hangcheck_score);
}
 
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
290,13 → 307,28
va_end(args);
}
 
static void print_error_obj(struct drm_i915_error_state_buf *m,
struct drm_i915_error_object *obj)
{
int page, offset, elt;
 
for (page = offset = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
err_printf(m, "%08x : %08x\n", offset,
obj->pages[page][elt]);
offset += 4;
}
}
}
 
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
int i, j, page, offset, elt;
int i, j, offset, elt;
int max_hangcheck_score;
 
if (!error) {
err_printf(m, "no error state collected\n");
303,9 → 335,26
goto out;
}
 
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
max_hangcheck_score = 0;
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
if (error->ring[i].hangcheck_score > max_hangcheck_score)
max_hangcheck_score = error->ring[i].hangcheck_score;
}
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
if (error->ring[i].hangcheck_score == max_hangcheck_score &&
error->ring[i].pid != -1) {
err_printf(m, "Active process (on ring %s): %s [%d]\n",
ring_str(i),
error->ring[i].comm,
error->ring[i].pid);
}
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
330,8 → 379,10
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
i915_ring_error_state(m, dev, error, i);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
err_printf(m, "%s command stream:\n", ring_str(i));
i915_ring_error_state(m, dev, &error->ring[i]);
}
 
if (error->active_bo)
print_error_buffers(m, "Active",
346,19 → 397,24
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
 
if ((obj = error->ring[i].batchbuffer)) {
err_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj = error->ring[i].batchbuffer;
if (obj) {
err_puts(m, dev_priv->ring[i].name);
if (error->ring[i].pid != -1)
err_printf(m, " (submitted by %s [%d])",
error->ring[i].comm,
error->ring[i].pid);
err_printf(m, " --- gtt_offset = 0x%08x\n",
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
err_printf(m, "%08x : %08x\n", offset,
obj->pages[page][elt]);
offset += 4;
print_error_obj(m, obj);
}
 
obj = error->ring[i].wa_batchbuffer;
if (obj) {
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name, obj->gtt_offset);
print_error_obj(m, obj);
}
}
 
if (error->ring[i].num_requests) {
err_printf(m, "%s --- %d requests\n",
376,19 → 432,11
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
err_printf(m, "%08x : %08x\n",
offset,
obj->pages[page][elt]);
offset += 4;
print_error_obj(m, obj);
}
}
}
 
if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
if ((obj = error->ring[i].hws_page)) {
err_printf(m, "%s --- HW Status = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0;
402,7 → 450,14
offset += 16;
}
}
 
if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
print_error_obj(m, obj);
}
}
 
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
469,6 → 524,7
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
kfree(error->ring[i].requests);
}
482,6 → 538,7
static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
struct i915_address_space *vm,
const int num_pages)
{
struct drm_i915_error_object *dst;
495,7 → 552,7
if (dst == NULL)
return NULL;
 
reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
505,8 → 562,10
goto unwind;
 
local_irq_save(flags);
if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) {
if (src->cache_level == I915_CACHE_NONE &&
reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping &&
i915_is_ggtt(vm)) {
void __iomem *s;
 
/* Simply ignore tiling or any overlapping fence.
556,10 → 615,14
kfree(dst);
return NULL;
}
#define i915_error_object_create(dev_priv, src) \
i915_error_object_create_sized((dev_priv), (src), \
#define i915_error_object_create(dev_priv, src, vm) \
i915_error_object_create_sized((dev_priv), (src), (vm), \
(src)->base.size>>PAGE_SHIFT)
 
#define i915_error_ggtt_object_create(dev_priv, src) \
i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
(src)->base.size>>PAGE_SHIFT)
 
static void capture_bo(struct drm_i915_error_buffer *err,
struct drm_i915_gem_object *obj)
{
572,7 → 635,7
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
err->pinned = 0;
if (obj->pin_count > 0)
if (i915_gem_obj_is_pinned(obj))
err->pinned = 1;
if (obj->user_pin_count > 0)
err->pinned = -1;
579,6 → 642,7
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level;
}
605,7 → 669,7
int i = 0;
 
list_for_each_entry(obj, head, global_list) {
if (obj->pin_count == 0)
if (!i915_gem_obj_is_pinned(obj))
continue;
 
capture_bo(err++, obj);
616,6 → 680,39
return i;
}
 
/* Generate a semi-unique error code. The code is not meant to have meaning, The
* code's only purpose is to try to prevent false duplicated bug reports by
* grossly estimating a GPU error state.
*
* TODO Ideally, hashing the batchbuffer would be a very nice way to determine
* the hang if we could strip the GTT offset information from it.
*
* It's only a small step better than a random number in its current form.
*/
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
int *ring_id)
{
uint32_t error_code = 0;
int i;
 
/* IPEHR would be an ideal way to detect errors, as it's the gross
* measure of "the command that hung." However, has some very common
* synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some.
*/
for (i = 0; i < I915_NUM_RINGS; i++) {
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
if (ring_id)
*ring_id = i;
 
return error->ring[i].ipehr ^ error->ring[i].instdone;
}
}
 
return error_code;
}
 
static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_error_state *error)
{
649,111 → 746,120
}
}
 
static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
struct i915_address_space *vm;
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
u32 seqno;
 
if (!ring->get_seqno)
return NULL;
 
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
u32 acthd = I915_READ(ACTHD);
 
if (WARN_ON(ring->id != RCS))
return NULL;
 
obj = ring->scratch.obj;
if (obj != NULL &&
acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
 
seqno = ring->get_seqno(ring, false);
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
list_for_each_entry(vma, &vm->active_list, mm_list) {
obj = vma->obj;
if (obj->ring != ring)
continue;
 
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
 
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
 
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj);
}
}
 
return NULL;
}
 
static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen >= 6) {
error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
ering->semaphore_mboxes[0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
ering->semaphore_mboxes[1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
}
 
if (HAS_VEBOX(dev)) {
error->semaphore_mboxes[ring->id][2] =
ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base));
error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
}
 
if (INTEL_INFO(dev)->gen >= 4) {
error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
if (INTEL_INFO(dev)->gen >= 8)
error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
if (INTEL_INFO(dev)->gen >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
}
ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
error->ipehr[ring->id] = I915_READ(IPEHR);
error->instdone[ring->id] = I915_READ(INSTDONE);
ering->faddr = I915_READ(DMA_FADD_I8XX);
ering->ipeir = I915_READ(IPEIR);
ering->ipehr = I915_READ(IPEHR);
ering->instdone = I915_READ(INSTDONE);
}
 
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
error->ctl[ring->id] = I915_READ_CTL(ring);
ering->waiting = waitqueue_active(&ring->irq_queue);
ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
ering->seqno = ring->get_seqno(ring, false);
ering->acthd = intel_ring_get_active_head(ring);
ering->head = I915_READ_HEAD(ring);
ering->tail = I915_READ_TAIL(ring);
ering->ctl = I915_READ_CTL(ring);
 
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
if (I915_NEED_GFX_HWS(dev)) {
int mmio;
 
error->hangcheck_score[ring->id] = ring->hangcheck.score;
error->hangcheck_action[ring->id] = ring->hangcheck.action;
if (IS_GEN7(dev)) {
switch (ring->id) {
default:
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
case VECS:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base);
}
 
ering->hws = I915_READ(mmio);
}
 
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
ering->cpu_ring_head = ring->buffer->head;
ering->cpu_ring_tail = ring->buffer->tail;
 
ering->hangcheck_score = ring->hangcheck.score;
ering->hangcheck_action = ring->hangcheck.action;
 
if (USES_PPGTT(dev)) {
int i;
 
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
 
switch (INTEL_INFO(dev)->gen) {
case 8:
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(ring, i));
ering->vm_info.pdp[i] <<= 32;
ering->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(ring, i));
}
break;
case 7:
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring));
break;
case 6:
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(ring));
break;
}
}
}
 
 
static void i915_gem_record_active_context(struct intel_engine_cs *ring,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
766,8 → 872,7
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_object_create_sized(dev_priv,
obj, 1);
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
break;
}
}
781,21 → 886,56
int i, count;
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
struct intel_engine_cs *ring = &dev_priv->ring[i];
 
error->ring[i].pid = -1;
 
if (ring->dev == NULL)
continue;
 
error->ring[i].valid = true;
 
i915_record_ring_state(dev, error, ring);
i915_record_ring_state(dev, ring, &error->ring[i]);
 
request = i915_gem_find_active_request(ring);
if (request) {
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
*/
error->ring[i].batchbuffer =
i915_error_first_batchbuffer(dev_priv, ring);
i915_error_object_create(dev_priv,
request->batch_obj,
request->ctx ?
request->ctx->vm :
&dev_priv->gtt.base);
 
if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
ring->scratch.obj)
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
 
if (request->file_priv) {
struct task_struct *task;
 
rcu_read_lock();
task = pid_task(request->file_priv->file->pid,
PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
}
rcu_read_unlock();
}
}
 
error->ring[i].ringbuffer =
i915_error_object_create(dev_priv, ring->obj);
i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
 
if (ring->status_page.obj)
error->ring[i].hws_page =
i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
 
i915_gem_record_active_context(ring, error, &error->ring[i]);
 
842,7 → 982,7
i++;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
if (i915_gem_obj_is_pinned(obj))
i++;
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
876,11 → 1016,6
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
cnt++;
 
if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
cnt = 1;
 
vm = &dev_priv->gtt.base;
 
error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
892,6 → 1027,105
i915_gem_capture_vm(dev_priv, error, vm, i++);
}
 
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
struct drm_device *dev = dev_priv->dev;
 
/* General organization
* 1. Registers specific to a single generation
* 2. Registers which belong to multiple generations
* 3. Feature specific registers.
* 4. Everything else
* Please try to follow the order.
*/
 
/* 1: Registers specific to a single generation */
if (IS_VALLEYVIEW(dev)) {
error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
error->forcewake = I915_READ(FORCEWAKE_VLV);
}
 
if (IS_GEN7(dev))
error->err_int = I915_READ(GEN7_ERR_INT);
 
if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
}
 
/* 2: Registers which belong to multiple generations */
if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT);
 
if (INTEL_INFO(dev)->gen >= 6) {
error->derrmr = I915_READ(DERRMR);
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
}
 
/* 3: Feature specific registers */
if (IS_GEN6(dev) || IS_GEN7(dev)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
 
/* 4: Everything else */
if (HAS_HW_CONTEXTS(dev))
error->ccid = I915_READ(CCID);
 
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
else {
if (IS_GEN2(dev))
error->ier = I915_READ16(IER);
else
error->ier = I915_READ(IER);
}
 
/* 4: Everything else */
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
 
i915_get_extra_instdone(dev, error->extra_instdone);
}
 
static void i915_error_capture_msg(struct drm_device *dev,
struct drm_i915_error_state *error,
bool wedged,
const char *error_msg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode;
int ring_id = -1, len;
 
ecode = i915_error_generate_code(dev_priv, error, &ring_id);
 
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:0x%08x", ring_id, ecode);
 
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
error->ring[ring_id].comm,
error->ring[ring_id].pid);
 
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
", reason: %s, action: %s",
error_msg,
wedged ? "reset" : "continue");
}
 
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
error->reset_count = i915_reset_count(&dev_priv->gpu_error);
error->suspend_count = dev_priv->suspend_count;
}
 
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
901,19 → 1135,14
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
void i915_capture_error_state(struct drm_device *dev)
void i915_capture_error_state(struct drm_device *dev, bool wedged,
const char *error_msg)
{
static bool warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
int pipe;
 
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
return;
 
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
921,52 → 1150,10
return;
}
 
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
dev->primary->index);
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
 
kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
if (HAS_HW_CONTEXTS(dev))
error->ccid = I915_READ(CCID);
 
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
else if (IS_VALLEYVIEW(dev))
error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
else if (IS_GEN2(dev))
error->ier = I915_READ16(IER);
else
error->ier = I915_READ(IER);
 
if (INTEL_INFO(dev)->gen >= 6)
error->derrmr = I915_READ(DERRMR);
 
if (IS_VALLEYVIEW(dev))
error->forcewake = I915_READ(FORCEWAKE_VLV);
else if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT);
else if (INTEL_INFO(dev)->gen == 6)
error->forcewake = I915_READ(FORCEWAKE);
 
if (!HAS_PCH_SPLIT(dev))
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
}
 
if (INTEL_INFO(dev)->gen == 7)
error->err_int = I915_READ(GEN7_ERR_INT);
 
i915_get_extra_instdone(dev, error->extra_instdone);
 
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
976,6 → 1163,9
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
 
i915_error_capture_msg(dev, error, wedged, error_msg);
DRM_INFO("%s\n", error->error_msg);
 
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->gpu_error.first_error = error;
983,10 → 1173,21
}
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
if (error)
if (error) {
i915_error_state_free(&error->ref);
return;
}
 
if (!warned) {
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
warned = true;
}
}
 
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv)
{
/drivers/video/drm/i915/i915_irq.c
80,7 → 80,56
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
/* IIR can theoretically queue up two events. Be paranoid. */
#define GEN8_IRQ_RESET_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IMR(which)); \
I915_WRITE(GEN8_##type##_IER(which), 0); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)
 
#define GEN5_IRQ_RESET(type) do { \
I915_WRITE(type##IMR, 0xffffffff); \
POSTING_READ(type##IMR); \
I915_WRITE(type##IER, 0); \
I915_WRITE(type##IIR, 0xffffffff); \
POSTING_READ(type##IIR); \
I915_WRITE(type##IIR, 0xffffffff); \
POSTING_READ(type##IIR); \
} while (0)
 
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
u32 val = I915_READ(reg); \
if (val) { \
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
(reg), val); \
I915_WRITE((reg), 0xffffffff); \
POSTING_READ(reg); \
I915_WRITE((reg), 0xffffffff); \
POSTING_READ(reg); \
} \
} while (0)
 
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
POSTING_READ(GEN8_##type##_IER(which)); \
} while (0)
 
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
I915_WRITE(type##IMR, (imr_val)); \
I915_WRITE(type##IER, (ier_val)); \
POSTING_READ(type##IER); \
} while (0)
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
94,15 → 143,12
 
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.deimr &= ~mask;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
}
 
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
112,15 → 158,12
}
 
static void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.deimr |= mask;
if (!intel_irqs_enabled(dev_priv))
return;
}
 
if ((dev_priv->irq_mask & mask) != mask) {
dev_priv->irq_mask |= mask;
141,13 → 184,8
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
interrupt_mask);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
}
 
dev_priv->gt_irq_mask &= ~interrupt_mask;
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
155,12 → 193,12
POSTING_READ(GTIMR);
}
 
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
}
 
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
179,13 → 217,8
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
interrupt_mask);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
}
 
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
198,12 → 231,12
}
}
 
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, mask);
}
 
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
226,6 → 259,46
return true;
}
 
/**
* bdw_update_pm_irq - update GT interrupt 2
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*
* Copied from the snb function, updated with relevant register offsets
*/
static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
uint32_t new_val;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
 
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
POSTING_READ(GEN8_GT_IMR(2));
}
}
 
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, mask);
}
 
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, 0);
}
 
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
244,6 → 317,53
return true;
}
 
void i9xx_check_fifo_underruns(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
for_each_intel_crtc(dev, crtc) {
u32 reg = PIPESTAT(crtc->pipe);
u32 pipestat;
 
if (crtc->cpu_fifo_underrun_disabled)
continue;
 
pipestat = I915_READ(reg) & 0xffff0000;
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
continue;
 
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
 
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
}
 
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (enable) {
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
} else {
if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
}
 
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
258,7 → 378,8
}
 
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
enum pipe pipe,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
269,14 → 390,11
 
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
 
/* Change the state _after_ we've read out the current one. */
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
 
if (!was_enabled &&
(I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
if (old &&
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
DRM_ERROR("uncleared fifo underrun on pipe %c\n",
pipe_name(pipe));
}
}
313,14 → 431,8
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->pc8.irqs_disabled &&
(interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
WARN(1, "IRQs disabled\n");
dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
interrupt_mask);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
}
 
I915_WRITE(SDEIMR, sdeimr);
POSTING_READ(SDEIMR);
346,7 → 458,7
 
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
359,15 → 471,11
 
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
uint32_t tmp = I915_READ(SERR_INT);
bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
 
/* Change the state _after_ we've read out the current one. */
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
 
if (!was_enabled &&
(tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
if (old && I915_READ(SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
transcoder_name(pch_transcoder));
}
}
387,36 → 495,55
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
bool old;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
assert_spin_locked(&dev_priv->irq_lock);
 
ret = !intel_crtc->cpu_fifo_underrun_disabled;
 
if (enable == ret)
goto done;
 
old = !intel_crtc->cpu_fifo_underrun_disabled;
intel_crtc->cpu_fifo_underrun_disabled = !enable;
 
if (IS_GEN5(dev) || IS_GEN6(dev))
if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN8(dev))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
done:
return old;
}
 
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
bool ret;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return ret;
}
 
static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
return !intel_crtc->cpu_fifo_underrun_disabled;
}
 
/**
* intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
* @dev: drm device
439,7 → 566,7
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
bool old;
 
/*
* NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
452,63 → 579,132
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
ret = !intel_crtc->pch_fifo_underrun_disabled;
 
if (enable == ret)
goto done;
 
old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable;
 
if (HAS_PCH_IBX(dev))
ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
else
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
 
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return ret;
return old;
}
 
 
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
{
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if ((pipestat & mask) == mask)
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
pipe_name(pipe), enable_mask, status_mask))
return;
 
if ((pipestat & enable_mask) == enable_mask)
return;
 
dev_priv->pipestat_irq_mask[pipe] |= status_mask;
 
/* Enable the interrupt, clear any pending status */
pipestat |= mask | (mask >> 16);
pipestat |= enable_mask | status_mask;
I915_WRITE(reg, pipestat);
POSTING_READ(reg);
}
 
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
static void
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
{
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if ((pipestat & mask) == 0)
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
pipe_name(pipe), enable_mask, status_mask))
return;
 
pipestat &= ~mask;
if ((pipestat & enable_mask) == 0)
return;
 
dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
 
pipestat &= ~enable_mask;
I915_WRITE(reg, pipestat);
POSTING_READ(reg);
}
 
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
{
u32 enable_mask = status_mask << 16;
 
/*
* On pipe A we don't support the PSR interrupt yet,
* on pipe B and C the same bit MBZ.
*/
if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
return 0;
/*
* On pipe B and C we don't support the PSR interrupt yet, on pipe
* A the same bit is for perf counters which we don't use either.
*/
if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
return 0;
 
enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
SPRITE0_FLIP_DONE_INT_EN_VLV |
SPRITE1_FLIP_DONE_INT_EN_VLV);
if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
 
return enable_mask;
}
 
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 status_mask)
{
u32 enable_mask;
 
if (IS_VALLEYVIEW(dev_priv->dev))
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
status_mask);
else
enable_mask = status_mask << 16;
__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}
 
void
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 status_mask)
{
u32 enable_mask;
 
if (IS_VALLEYVIEW(dev_priv->dev))
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
status_mask);
else
enable_mask = status_mask << 16;
__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
}
 
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
*/
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
516,10 → 712,10
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_ENABLE);
PIPE_LEGACY_BLC_EVENT_STATUS);
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
536,7 → 732,7
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Locking is horribly broken here, but whatever. */
549,6 → 745,56
}
}
 
/*
* This timing diagram depicts the video signal in and
* around the vertical blanking period.
*
* Assumptions about the fictitious mode used in this example:
* vblank_start >= 3
* vsync_start = vblank_start + 1
* vsync_end = vblank_start + 2
* vtotal = vblank_start + 3
*
* start of vblank:
* latch double buffered registers
* increment frame counter (ctg+)
* generate start of vblank interrupt (gen4+)
* |
* | frame start:
* | generate frame start interrupt (aka. vblank interrupt) (gmch)
* | may be shifted forward 1-3 extra lines via PIPECONF
* | |
* | | start of vsync:
* | | generate vsync interrupt
* | | |
* ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
* . \hs/ . \hs/ \hs/ \hs/ . \hs/
* ----va---> <-----------------vb--------------------> <--------va-------------
* | | <----vs-----> |
* -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
* -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
* -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
* | | |
* last visible pixel first visible pixel
* | increment frame counter (gen3/4)
* pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
*
* x = horizontal active
* _ = horizontal blanking
* hs = horizontal sync
* va = vertical active
* vb = vertical blanking
* vs = vertical sync
* vbs = vblank_start (number)
*
* Summary:
* - most events happen at the start of horizontal sync
* - frame start happens at the start of horizontal blank, 1-4 lines
* (depending on PIPECONF settings) after the start of vblank
* - gen3/4 pixel and frame counter are synchronized with the start
* of horizontal active on the first line of vertical active
*/
 
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
{
/* Gen2 doesn't have a hardware frame counter */
560,10 → 806,10
*/
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
577,18 → 823,28
const struct drm_display_mode *mode =
&intel_crtc->config.adjusted_mode;
 
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
vbl_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vbl_start = DIV_ROUND_UP(vbl_start, 2);
} else {
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
u32 htotal;
enum transcoder cpu_transcoder = (enum transcoder) pipe;
 
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
if ((I915_READ(PIPECONF(cpu_transcoder)) &
PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
vbl_start = DIV_ROUND_UP(vbl_start, 2);
}
 
/* Convert to pixel count */
vbl_start *= htotal;
}
 
/* Start of vblank event occurs at start of hsync */
vbl_start -= htotal - hsync_start;
 
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
 
617,7 → 873,7
 
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
 
if (!i915_pipe_enabled(dev, pipe)) {
631,33 → 887,29
 
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
 
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t status;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
 
if (INTEL_INFO(dev)->gen < 7) {
status = pipe == PIPE_A ?
DE_PIPEA_VBLANK :
DE_PIPEB_VBLANK;
} else {
switch (pipe) {
default:
case PIPE_A:
status = DE_PIPEA_VBLANK_IVB;
break;
case PIPE_B:
status = DE_PIPEB_VBLANK_IVB;
break;
case PIPE_C:
status = DE_PIPEC_VBLANK_IVB;
break;
}
}
vtotal = mode->crtc_vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
 
return __raw_i915_read32(dev_priv, DEISR) & status;
if (IS_GEN2(dev))
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
/*
* See update_scanline_offset() for the details on the
* scanline_offset adjustment.
*/
return (position + crtc->scanline_offset) % vtotal;
}
 
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
669,7 → 921,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
int position;
int vbl_start, vbl_end, htotal, vtotal;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
unsigned long irqflags;
681,6 → 933,7
}
 
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
vtotal = mode->crtc_vtotal;
vbl_start = mode->crtc_vblank_start;
vbl_end = mode->crtc_vblank_end;
707,48 → 960,8
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
if (IS_GEN2(dev))
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
if (HAS_PCH_SPLIT(dev)) {
/*
* The scanline counter increments at the leading edge
* of hsync, ie. it completely misses the active portion
* of the line. Fix up the counter at both edges of vblank
* to get a more accurate picture whether we're in vblank
* or not.
*/
in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
if ((in_vbl && position == vbl_start - 1) ||
(!in_vbl && position == vbl_end - 1))
position = (position + 1) % vtotal;
position = __intel_get_crtc_scanline(intel_crtc);
} else {
/*
* ISR vblank status bits don't work the way we'd want
* them to work on non-PCH platforms (for
* ilk_pipe_in_vblank_locked()), and there doesn't
* appear any other way to determine if we're currently
* in vblank.
*
* Instead let's assume that we're already in vblank if
* we got called from the vblank interrupt and the
* scanline counter value indicates that we're on the
* line just prior to vblank start. This should result
* in the correct answer, unless the vblank interrupt
* delivery really got delayed for almost exactly one
* full frame/field.
*/
if (flags & DRM_CALLED_FROM_VBLIRQ &&
position == vbl_start - 1) {
position = (position + 1) % vtotal;
 
/* Signal this correction as "applied". */
ret |= 0x8;
}
}
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
* scanout position.
759,6 → 972,29
vbl_start *= htotal;
vbl_end *= htotal;
vtotal *= htotal;
 
/*
* In interlaced modes, the pixel counter counts all pixels,
* so one field will have htotal more pixels. In order to avoid
* the reported position from jumping backwards when the pixel
* counter is beyond the length of the shorter field, just
* clamp the position the length of the shorter field. This
* matches how the scanline counter based position works since
* the scanline counter doesn't count the two half lines.
*/
if (position >= vtotal)
position = vtotal - 1;
 
/*
* Start of vblank interrupt is triggered at start of hsync,
* just prior to the first active line of vblank. However we
* consider lines to start at the leading edge of horizontal
* active. So, should we get here before we've crossed into
* the horizontal active of the first line in vblank, we would
* not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
* always add htotal-hsync_start to the current pixel position.
*/
position = (position + htotal - hsync_start) % vtotal;
}
 
 
794,6 → 1030,19
return ret;
}
 
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
unsigned long irqflags;
int position;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
position = __intel_get_crtc_scanline(crtc);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
return position;
}
 
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
int *max_error,
struct timeval *vblank_time,
839,7 → 1088,7
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
drm_get_connector_name(connector),
connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
 
853,8 → 1102,8
 
static void i915_hotplug_work_func(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
hotplug_work);
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
865,10 → 1114,6
bool changed = false;
u32 hpd_event_bits;
 
/* HPD irq before everything is fully set up. */
if (!dev_priv->enable_hotplug_processing)
return;
 
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
878,6 → 1123,8
dev_priv->hpd_event_bits = 0;
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
intel_encoder = intel_connector->encoder;
if (intel_encoder->hpd_pin > HPD_NONE &&
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
884,7 → 1131,7
connector->polled == DRM_CONNECTOR_POLL_HPD) {
DRM_INFO("HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
drm_get_connector_name(connector));
connector->name);
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
892,22 → 1139,19
}
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
drm_get_connector_name(connector), intel_encoder->hpd_pin);
connector->name, intel_encoder->hpd_pin);
}
}
/* if there were no outputs to poll, poll was disabled,
* therefore make sure it's enabled when disabling HPD on
* some connectors */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
mod_timer(&dev_priv->hotplug_reenable_timer,
GetTimerTicks() + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
}
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
if (intel_encoder->hot_plug)
918,13 → 1162,11
}
mutex_unlock(&mode_config->mutex);
 
if (changed)
drm_kms_helper_hotplug_event(dev);
}
 
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
 
962,9 → 1204,9
}
 
static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
struct intel_engine_cs *ring)
{
if (ring->obj == NULL)
if (!intel_ring_initialized(ring))
return;
 
trace_i915_gem_request_complete(ring);
972,10 → 1214,135
wake_up_all(&ring->irq_queue);
}
 
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
struct intel_rps_ei *rps_ei)
{
u32 cz_ts, cz_freq_khz;
u32 render_count, media_count;
u32 elapsed_render, elapsed_media, elapsed_time;
u32 residency = 0;
 
cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
 
render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
 
if (rps_ei->cz_clock == 0) {
rps_ei->cz_clock = cz_ts;
rps_ei->render_c0 = render_count;
rps_ei->media_c0 = media_count;
 
return dev_priv->rps.cur_freq;
}
 
elapsed_time = cz_ts - rps_ei->cz_clock;
rps_ei->cz_clock = cz_ts;
 
elapsed_render = render_count - rps_ei->render_c0;
rps_ei->render_c0 = render_count;
 
elapsed_media = media_count - rps_ei->media_c0;
rps_ei->media_c0 = media_count;
 
/* Convert all the counters into common unit of milli sec */
elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
elapsed_render /= cz_freq_khz;
elapsed_media /= cz_freq_khz;
 
/*
* Calculate overall C0 residency percentage
* only if elapsed time is non zero
*/
if (elapsed_time) {
residency =
((max(elapsed_render, elapsed_media) * 100)
/ elapsed_time);
}
 
return residency;
}
 
/**
* vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
* busy-ness calculated from C0 counters of render & media power wells
* @dev_priv: DRM device private
*
*/
static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
{
u32 residency_C0_up = 0, residency_C0_down = 0;
u8 new_delay, adj;
 
dev_priv->rps.ei_interrupt_count++;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
 
if (dev_priv->rps.up_ei.cz_clock == 0) {
vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
return dev_priv->rps.cur_freq;
}
 
 
/*
* To down throttle, C0 residency should be less than down threshold
* for continous EI intervals. So calculate down EI counters
* once in VLV_INT_COUNT_FOR_DOWN_EI
*/
if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
 
dev_priv->rps.ei_interrupt_count = 0;
 
residency_C0_down = vlv_c0_residency(dev_priv,
&dev_priv->rps.down_ei);
} else {
residency_C0_up = vlv_c0_residency(dev_priv,
&dev_priv->rps.up_ei);
}
 
new_delay = dev_priv->rps.cur_freq;
 
adj = dev_priv->rps.last_adj;
/* C0 residency is greater than UP threshold. Increase Frequency */
if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
if (adj > 0)
adj *= 2;
else
adj = 1;
 
if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
new_delay = dev_priv->rps.cur_freq + adj;
 
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
if (new_delay < dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
 
} else if (!dev_priv->rps.ei_interrupt_count &&
(residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
if (adj < 0)
adj *= 2;
else
adj = -1;
/*
* This means, C0 residency is less than down threshold over
* a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
*/
if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
new_delay = dev_priv->rps.cur_freq + adj;
}
 
return new_delay;
}
 
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.work);
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, rps.work);
u32 pm_iir;
int new_delay, adj;
 
982,14 → 1349,18
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer code */
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
if (INTEL_INFO(dev_priv->dev)->gen >= 8)
gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
else {
/* Make sure not to corrupt PMIMR state used by ringbuffer */
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
 
/* Make sure we didn't queue anything we're not going to process. */
WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
 
if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
if ((pm_iir & dev_priv->pm_rps_events) == 0)
return;
 
mutex_lock(&dev_priv->rps.hw_lock);
998,30 → 1369,36
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
adj *= 2;
else
adj = 1;
new_delay = dev_priv->rps.cur_delay + adj;
else {
/* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
}
new_delay = dev_priv->rps.cur_freq + adj;
 
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
if (new_delay < dev_priv->rps.rpe_delay)
new_delay = dev_priv->rps.rpe_delay;
if (new_delay < dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
new_delay = dev_priv->rps.rpe_delay;
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
else
new_delay = dev_priv->rps.min_delay;
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
else
adj = -1;
new_delay = dev_priv->rps.cur_delay + adj;
else {
/* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
}
new_delay = dev_priv->rps.cur_freq + adj;
} else { /* unknown event */
new_delay = dev_priv->rps.cur_delay;
new_delay = dev_priv->rps.cur_freq;
}
 
/* sysfs frequency interfaces may have snuck in while servicing the
1028,9 → 1405,11
* interrupt
*/
new_delay = clamp_t(int, new_delay,
dev_priv->rps.min_delay, dev_priv->rps.max_delay);
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
dev_priv->rps.min_freq_softlimit,
dev_priv->rps.max_freq_softlimit);
 
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
 
if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay);
else
1051,8 → 1430,8
*/
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
l3_parity.error_work);
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[6];
uint32_t misccpctl;
1102,7 → 1481,7
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
mutex_unlock(&dev_priv->dev->struct_mutex);
1110,13 → 1489,13
 
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!HAS_L3_DPF(dev))
return;
 
spin_lock(&dev_priv->irq_lock);
ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
 
iir &= GT_PARITY_ERROR(dev);
1156,8 → 1535,8
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
i915_handle_error(dev, false);
i915_handle_error(dev, false, "GT error interrupt 0x%08x",
gt_iir);
}
 
if (gt_iir & GT_PARITY_ERROR(dev))
1164,6 → 1543,19
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
 
static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if ((pm_iir & dev_priv->pm_rps_events) == 0)
return;
 
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
 
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
 
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
1175,6 → 1567,7
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(0));
if (tmp) {
I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1182,31 → 1575,44
notify_ring(dev, &dev_priv->ring[RCS]);
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
I915_WRITE(GEN8_GT_IIR(0), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
 
if (master_ctl & GEN8_GT_VCS1_IRQ) {
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(1));
if (tmp) {
I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
I915_WRITE(GEN8_GT_IIR(1), tmp);
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS2]);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
 
if (master_ctl & GEN8_GT_PM_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) {
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
gen8_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
 
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VECS]);
I915_WRITE(GEN8_GT_IIR(3), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
1217,34 → 1623,132
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
 
static int ilk_port_to_hotplug_shift(enum port port)
{
switch (port) {
case PORT_A:
case PORT_E:
default:
return -1;
case PORT_B:
return 0;
case PORT_C:
return 8;
case PORT_D:
return 16;
}
}
 
static int g4x_port_to_hotplug_shift(enum port port)
{
switch (port) {
case PORT_A:
case PORT_E:
default:
return -1;
case PORT_B:
return 17;
case PORT_C:
return 19;
case PORT_D:
return 21;
}
}
 
static inline enum port get_port_from_pin(enum hpd_pin pin)
{
switch (pin) {
case HPD_PORT_B:
return PORT_B;
case HPD_PORT_C:
return PORT_C;
case HPD_PORT_D:
return PORT_D;
default:
return PORT_A; /* no hpd */
}
}
 
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
u32 dig_hotplug_reg,
const u32 *hpd)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
enum port port;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
u32 dig_shift;
u32 dig_port_mask = 0;
 
if (!hotplug_trigger)
return;
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
hotplug_trigger, dig_hotplug_reg);
 
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
if (!(hpd[i] & hotplug_trigger))
continue;
 
WARN_ONCE(hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
port = get_port_from_pin(i);
if (port && dev_priv->hpd_irq_port[port]) {
bool long_hpd;
 
if (IS_G4X(dev)) {
dig_shift = g4x_port_to_hotplug_shift(port);
long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
} else {
dig_shift = ilk_port_to_hotplug_shift(port);
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
}
 
DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
/* for long HPD pulses we want to have the digital queue happen,
but we still want HPD storm detection to function. */
if (long_hpd) {
dev_priv->long_hpd_port_mask |= (1 << port);
dig_port_mask |= hpd[i];
} else {
/* for short HPD just trigger the digital queue */
dev_priv->short_hpd_port_mask |= (1 << port);
hotplug_trigger &= ~hpd[i];
}
queue_dig = true;
}
}
 
for (i = 1; i < HPD_NUM_PINS; i++) {
if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
"Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
hotplug_trigger, i, hpd[i]);
 
continue;
}
 
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
 
if (!(dig_port_mask & hpd[i])) {
dev_priv->hpd_event_bits |= (1 << i);
if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
queue_hp = true;
}
 
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
dev_priv->hpd_stats[i].hpd_last_jiffies
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks();
dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
dev_priv->hpd_stats[i].hpd_cnt = 0;
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1269,12 → 1773,13
* queue for otherwise the flush_work in the pageflip code will
* deadlock.
*/
if (queue_hp)
schedule_work(&dev_priv->hotplug_work);
}
 
static void gmbus_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
wake_up_all(&dev_priv->gmbus_wait_queue);
}
1281,7 → 1786,7
 
static void dp_aux_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
wake_up_all(&dev_priv->gmbus_wait_queue);
}
1387,10 → 1892,10
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if (pm_iir & GEN6_PM_RPS_EVENTS) {
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
 
queue_work(dev_priv->wq, &dev_priv->rps.work);
1401,58 → 1906,70
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
 
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
i915_handle_error(dev_priv->dev, false);
i915_handle_error(dev_priv->dev, false,
"VEBOX CS error interrupt 0x%08x",
pm_iir);
}
}
}
 
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
unsigned long irqflags;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pipe_stats[I915_MAX_PIPES] = { };
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
 
atomic_inc(&dev_priv->irq_received);
spin_lock(&dev_priv->irq_lock);
for_each_pipe(pipe) {
int reg;
u32 mask, iir_bit = 0;
 
while (true) {
iir = I915_READ(VLV_IIR);
gt_iir = I915_READ(GTIIR);
pm_iir = I915_READ(GEN6_PMIIR);
/*
* PIPESTAT bits get signalled even when the interrupt is
* disabled with the mask bits, and some of the status bits do
* not generate interrupts at all (like the underrun bit). Hence
* we need to be careful that we only handle what we want to
* handle.
*/
mask = 0;
if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
mask |= PIPE_FIFO_UNDERRUN_STATUS;
 
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
goto out;
switch (pipe) {
case PIPE_A:
iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case PIPE_B:
iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
case PIPE_C:
iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
break;
}
if (iir & iir_bit)
mask |= dev_priv->pipestat_irq_mask[pipe];
 
ret = IRQ_HANDLED;
if (!mask)
continue;
 
snb_gt_irq_handler(dev, dev_priv, gt_iir);
reg = PIPESTAT(pipe);
mask |= PIPESTAT_INT_ENABLE_MASK;
pipe_stats[pipe] = I915_READ(reg) & mask;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
/*
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
PIPESTAT_INT_STATUS_MASK))
I915_WRITE(reg, pipe_stats[pipe]);
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock(&dev_priv->irq_lock);
 
for_each_pipe(pipe) {
// if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
// drm_handle_vblank(dev, pipe);
 
if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip(dev, pipe);
}
1459,48 → 1976,141
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
 
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
gmbus_irq_handler(dev);
}
 
static void i9xx_hpd_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status) {
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
/*
* Make sure hotplug status is cleared before we clear IIR, or else we
* may miss hotplug events.
*/
POSTING_READ(PORT_HOTPLUG_STAT);
 
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
if (IS_G4X(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
dp_aux_irq_handler(dev);
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
} else {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
}
 
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
gmbus_irq_handler(dev);
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
dp_aux_irq_handler(dev);
}
}
 
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
 
while (true) {
/* Find, clear, then process each source of interrupt */
 
gt_iir = I915_READ(GTIIR);
if (gt_iir)
I915_WRITE(GTIIR, gt_iir);
 
pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir)
I915_WRITE(GEN6_PMIIR, pm_iir);
 
iir = I915_READ(VLV_IIR);
if (iir) {
/* Consume port before clearing IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT)
i9xx_hpd_irq_handler(dev);
I915_WRITE(VLV_IIR, iir);
}
 
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
goto out;
 
ret = IRQ_HANDLED;
 
if (gt_iir)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
/* Call regardless, as some status bits might not be
* signalled in iir */
valleyview_pipestat_irq_handler(dev, iir);
}
 
out:
return ret;
}
 
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl, iir;
irqreturn_t ret = IRQ_NONE;
 
for (;;) {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
 
if (master_ctl == 0 && iir == 0)
break;
 
ret = IRQ_HANDLED;
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
 
/* Find, clear, then process each source of interrupt */
 
if (iir) {
/* Consume port before clearing IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT)
i9xx_hpd_irq_handler(dev);
I915_WRITE(VLV_IIR, iir);
}
 
gen8_gt_irq_handler(dev, dev_priv, master_ctl);
 
/* Call regardless, as some status bits might not be
* signalled in iir */
valleyview_pipestat_irq_handler(dev, iir);
 
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
}
 
return ret;
}
 
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
u32 dig_hotplug_reg;
 
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
 
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
1538,12 → 2148,12
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
DRM_ERROR("PCH transcoder A FIFO underrun\n");
 
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
DRM_ERROR("PCH transcoder B FIFO underrun\n");
}
 
static void ivb_err_int_handler(struct drm_device *dev)
1559,7 → 2169,7
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
DRM_ERROR("Pipe %c FIFO underrun\n",
pipe_name(pipe));
}
 
1585,17 → 2195,17
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
DRM_ERROR("PCH transcoder A FIFO underrun\n");
 
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
DRM_ERROR("PCH transcoder B FIFO underrun\n");
 
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
false))
DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
DRM_ERROR("PCH transcoder C FIFO underrun\n");
 
I915_WRITE(SERR_INT, serr_int);
}
1602,12 → 2212,16
 
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
u32 dig_hotplug_reg;
 
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
 
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
1657,7 → 2271,7
 
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
DRM_ERROR("Pipe %c FIFO underrun\n",
pipe_name(pipe));
 
if (de_iir & DE_PIPE_CRC_DONE(pipe))
1690,7 → 2304,7
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe i;
enum pipe pipe;
 
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
1701,14 → 2315,14
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
 
for_each_pipe(i) {
// if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
// drm_handle_vblank(dev, i);
for_each_pipe(pipe) {
// if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
// drm_handle_vblank(dev, pipe);
 
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
// intel_prepare_page_flip(dev, i);
// intel_finish_page_flip_plane(dev, i);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip_plane(dev, pipe);
}
}
 
1723,15 → 2337,21
}
}
 
/*
* To handle irqs with the minimum potential races with fresh interrupts, we:
* 1 - Disable Master Interrupt Control.
* 2 - Find the source(s) of the interrupt.
* 3 - Clear the Interrupt Identity bits (IIR).
* 4 - Process the interrupt(s) that had bits set in the IIRs.
* 5 - Re-enable Master Interrupt Control.
*/
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
 
atomic_inc(&dev_priv->irq_received);
 
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
1752,32 → 2372,34
POSTING_READ(SDEIER);
}
 
/* Find, clear, then process each source of interrupt */
 
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 6)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
}
 
de_iir = I915_READ(DEIIR);
if (de_iir) {
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 7)
ivb_display_irq_handler(dev, de_iir);
else
ilk_display_irq_handler(dev, de_iir);
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
}
 
if (INTEL_INFO(dev)->gen >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
gen6_rps_irq_handler(dev_priv, pm_iir);
}
}
 
1800,8 → 2422,6
uint32_t tmp = 0;
enum pipe pipe;
 
atomic_inc(&dev_priv->irq_received);
 
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
1810,37 → 2430,37
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
 
/* Find, clear, then process each source of interrupt */
 
ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
 
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
ret = IRQ_HANDLED;
if (tmp & GEN8_DE_MISC_GSE)
intel_opregion_asle_intr(dev);
else if (tmp)
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
}
else
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
 
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
ret = IRQ_HANDLED;
}
}
 
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
if (tmp & GEN8_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
else if (tmp)
else
DRM_ERROR("Unexpected DE Port interrupt\n");
}
else
DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
 
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
}
}
 
for_each_pipe(pipe) {
uint32_t pipe_iir;
1849,10 → 2469,13
continue;
 
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (pipe_iir) {
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
// if (pipe_iir & GEN8_PIPE_VBLANK)
// drm_handle_vblank(dev, pipe);
// intel_pipe_handle_vblank(dev, pipe);
 
if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip_plane(dev, pipe);
}
1863,7 → 2486,7
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
DRM_ERROR("Pipe %c FIFO underrun\n",
pipe_name(pipe));
}
 
1872,10 → 2495,6
pipe_name(pipe),
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
}
 
if (pipe_iir) {
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
1887,14 → 2506,14
* on older pch-split platforms. But this needs testing.
*/
u32 pch_iir = I915_READ(SDEIIR);
 
cpt_irq_handler(dev, pch_iir);
 
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
ret = IRQ_HANDLED;
cpt_irq_handler(dev, pch_iir);
} else
DRM_ERROR("The master control interrupt lied (SDE)!\n");
 
}
}
 
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
1905,7 → 2524,7
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
int i;
 
/*
1939,8 → 2558,8
{
struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
work);
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
gpu_error);
struct drm_i915_private *dev_priv =
container_of(error, struct drm_i915_private, gpu_error);
struct drm_device *dev = dev_priv->dev;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2097,10 → 2716,17
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
void i915_handle_error(struct drm_device *dev, bool wedged)
void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...)
{
struct drm_i915_private *dev_priv = dev->dev_private;
va_list args;
char error_msg[80];
 
va_start(args, fmt);
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
 
// i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
 
2136,7 → 2762,7
#if 0
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
2168,8 → 2794,8
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
crtc->y * crtc->primary->fb->pitches[0] +
crtc->x * crtc->primary->fb->bits_per_pixel/8);
}
 
spin_unlock_irqrestore(&dev->event_lock, flags);
2187,7 → 2813,7
*/
static int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
2196,14 → 2822,10
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
PIPE_START_VBLANK_INTERRUPT_STATUS);
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
 
/* maintain vblank delivery even in deep C-states */
if (dev_priv->info->gen == 3)
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
PIPE_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
2211,7 → 2833,7
 
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
2228,22 → 2850,15
 
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
u32 imr;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
imr = I915_READ(VLV_IMR);
if (pipe == PIPE_A)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
I915_WRITE(VLV_IMR, imr);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
2270,22 → 2885,19
*/
static void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->info->gen == 3)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
 
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
PIPE_VBLANK_INTERRUPT_STATUS |
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
2297,19 → 2909,12
 
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
u32 imr;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
if (pipe == PIPE_A)
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
I915_WRITE(VLV_IMR, imr);
PIPE_START_VBLANK_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
2329,7 → 2934,7
}
 
static u32
ring_last_seqno(struct intel_ring_buffer *ring)
ring_last_seqno(struct intel_engine_cs *ring)
{
return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno;
2336,81 → 2941,160
}
 
static bool
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
ring_idle(struct intel_engine_cs *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
}
 
static struct intel_ring_buffer *
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
static bool
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
{
if (INTEL_INFO(dev)->gen >= 8) {
return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER);
}
}
 
static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, acthd, acthd_min;
struct intel_engine_cs *signaller;
int i;
 
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
for_each_ring(signaller, dev_priv, i) {
if (ring == signaller)
continue;
 
if (offset == signaller->semaphore.signal_ggtt[ring->id])
return signaller;
}
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
for_each_ring(signaller, dev_priv, i) {
if(ring == signaller)
continue;
 
if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
return signaller;
}
}
 
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
ring->id, ipehr, offset);
 
return NULL;
}
 
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
 
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if ((ipehr & ~(0x3 << 16)) !=
(MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
return NULL;
 
/* ACTHD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX.
/*
* HEAD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX. But limit it to just 3
* or 4 dwords depending on the semaphore wait command size.
* Note that we don't care about ACTHD here since that might
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
acthd_min = max((int)acthd - 3 * 4, 0);
do {
cmd = ioread32(ring->virtual_start + acthd);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
 
for (i = backwards; i; --i) {
/*
* Be paranoid and presume the hw has gone off into the wild -
* our ring is smaller than what the hardware (and hence
* HEAD_ADDR) allows. Also handles wrap-around.
*/
head &= ring->buffer->size - 1;
 
/* This here seems to blow up */
cmd = ioread32(ring->buffer->virtual_start + head);
if (cmd == ipehr)
break;
 
acthd -= 4;
if (acthd < acthd_min)
head -= 4;
}
 
if (!i)
return NULL;
} while (1);
 
*seqno = ioread32(ring->virtual_start+acthd+4)+1;
return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
if (INTEL_INFO(ring->dev)->gen >= 8) {
offset = ioread32(ring->buffer->virtual_start + head + 12);
offset <<= 32;
offset = ioread32(ring->buffer->virtual_start + head + 8);
}
return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
}
 
static int semaphore_passed(struct intel_ring_buffer *ring)
static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ring_buffer *signaller;
u32 seqno, ctl;
struct intel_engine_cs *signaller;
u32 seqno;
 
ring->hangcheck.deadlock = true;
ring->hangcheck.deadlock++;
 
signaller = semaphore_waits_for(ring, &seqno);
if (signaller == NULL || signaller->hangcheck.deadlock)
if (signaller == NULL)
return -1;
 
/* Prevent pathological recursion due to driver bugs */
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
 
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1;
 
/* cursory check for an unkickable deadlock */
ctl = I915_READ_CTL(signaller);
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
semaphore_passed(signaller) < 0)
return -1;
 
return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
return 0;
}
 
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
int i;
 
for_each_ring(ring, dev_priv, i)
ring->hangcheck.deadlock = false;
ring->hangcheck.deadlock = 0;
}
 
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
 
if (ring->hangcheck.acthd != acthd)
if (acthd != ring->hangcheck.acthd) {
if (acthd > ring->hangcheck.max_acthd) {
ring->hangcheck.max_acthd = acthd;
return HANGCHECK_ACTIVE;
}
 
return HANGCHECK_ACTIVE_LOOP;
}
 
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
 
2421,7 → 3105,8
*/
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT) {
DRM_ERROR("Kicking stuck wait on %s\n",
i915_handle_error(dev, false,
"Kicking stuck wait on %s",
ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
2432,7 → 3117,8
default:
return HANGCHECK_HUNG;
case 1:
DRM_ERROR("Kicking stuck semaphore on %s\n",
i915_handle_error(dev, false,
"Kicking stuck semaphore on %s",
ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
2455,8 → 3141,8
static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int i;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
2463,13 → 3149,13
#define BUSY 1
#define KICK 5
#define HUNG 20
#define FIRE 30
 
if (!i915_enable_hangcheck)
if (!i915.enable_hangcheck)
return;
 
for_each_ring(ring, dev_priv, i) {
u32 seqno, acthd;
u64 acthd;
u32 seqno;
bool busy = true;
 
semaphore_clear_deadlocks(dev_priv);
2479,6 → 3165,8
 
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
ring->hangcheck.action = HANGCHECK_IDLE;
 
// if (waitqueue_active(&ring->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
// DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2508,8 → 3196,9
switch (ring->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
case HANGCHECK_ACTIVE:
break;
case HANGCHECK_ACTIVE:
case HANGCHECK_ACTIVE_LOOP:
ring->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
2529,6 → 3218,8
*/
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
 
ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
}
 
ring->hangcheck.seqno = seqno;
2537,7 → 3228,7
}
 
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score > FIRE) {
if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress",
ring->name);
2549,8 → 3240,7
// return i915_handle_error(dev, true);
 
}
 
static void ibx_irq_preinstall(struct drm_device *dev)
static void ibx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
2557,61 → 3247,63
if (HAS_PCH_NOP(dev))
return;
 
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
GEN5_IRQ_RESET(SDE);
 
if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
I915_WRITE(SERR_INT, 0xffffffff);
}
 
/*
* SDEIER is also touched by the interrupt handler to work around missed
* PCH interrupts. Hence we can't update it after the interrupt handler
* is enabled - instead we unconditionally enable all PCH interrupt
* sources here, but then only unmask them as needed with SDEIMR.
* SDEIER is also touched by the interrupt handler to work around missed PCH
* interrupts. Hence we can't update it after the interrupt handler is enabled -
* instead we unconditionally enable all PCH interrupt sources here, but then
* only unmask them as needed with SDEIMR.
*
* This function needs to be called before interrupts are enabled.
*/
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_NOP(dev))
return;
 
WARN_ON(I915_READ(SDEIER) != 0);
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
}
 
static void gen5_gt_irq_preinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
 
if (INTEL_INFO(dev)->gen >= 6) {
/* and PM */
I915_WRITE(GEN6_PMIMR, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0x0);
POSTING_READ(GEN6_PMIER);
GEN5_IRQ_RESET(GT);
if (INTEL_INFO(dev)->gen >= 6)
GEN5_IRQ_RESET(GEN6_PM);
}
}
 
/* drm_dma.h hooks
*/
static void ironlake_irq_preinstall(struct drm_device *dev)
static void ironlake_irq_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
atomic_set(&dev_priv->irq_received, 0);
I915_WRITE(HWSTAM, 0xffffffff);
 
I915_WRITE(HWSTAM, 0xeffe);
GEN5_IRQ_RESET(DE);
if (IS_GEN7(dev))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
 
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
POSTING_READ(DEIER);
gen5_gt_irq_reset(dev);
 
gen5_gt_irq_preinstall(dev);
 
ibx_irq_preinstall(dev);
ibx_irq_reset(dev);
}
 
static void valleyview_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2622,7 → 3314,7
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
 
gen5_gt_irq_preinstall(dev);
gen5_gt_irq_reset(dev);
 
I915_WRITE(DPINVGTT, 0xff);
 
2636,58 → 3328,79
POSTING_READ(VLV_IER);
}
 
static void gen8_irq_preinstall(struct drm_device *dev)
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
GEN8_IRQ_RESET_NDX(GT, 0);
GEN8_IRQ_RESET_NDX(GT, 1);
GEN8_IRQ_RESET_NDX(GT, 2);
GEN8_IRQ_RESET_NDX(GT, 3);
}
 
static void gen8_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
 
/* IIR can theoretically queue up two events. Be paranoid */
#define GEN8_IRQ_INIT_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IMR(which)); \
I915_WRITE(GEN8_##type##_IER(which), 0); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
} while (0)
gen8_gt_irq_reset(dev_priv);
 
#define GEN8_IRQ_INIT(type) do { \
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
POSTING_READ(GEN8_##type##_IMR); \
I915_WRITE(GEN8_##type##_IER, 0); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
} while (0)
for_each_pipe(pipe)
if (intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
 
GEN8_IRQ_INIT_NDX(GT, 0);
GEN8_IRQ_INIT_NDX(GT, 1);
GEN8_IRQ_INIT_NDX(GT, 2);
GEN8_IRQ_INIT_NDX(GT, 3);
GEN5_IRQ_RESET(GEN8_DE_PORT_);
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
 
for_each_pipe(pipe) {
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
ibx_irq_reset(dev);
}
 
GEN8_IRQ_INIT(DE_PORT);
GEN8_IRQ_INIT(DE_MISC);
GEN8_IRQ_INIT(PCU);
#undef GEN8_IRQ_INIT
#undef GEN8_IRQ_INIT_NDX
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
~dev_priv->de_irq_mask[PIPE_B]);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
~dev_priv->de_irq_mask[PIPE_C]);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
 
gen8_gt_irq_reset(dev_priv);
 
GEN5_IRQ_RESET(GEN8_PCU_);
 
POSTING_READ(GEN8_PCU_IIR);
 
ibx_irq_preinstall(dev);
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
I915_WRITE(VLV_IIR, 0xffffffff);
POSTING_READ(VLV_IIR);
}
 
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2722,22 → 3435,18
 
static void ibx_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask;
 
if (HAS_PCH_NOP(dev))
return;
 
if (HAS_PCH_IBX(dev)) {
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
SDE_TRANSA_FIFO_UNDER | SDE_POISON;
} else {
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
if (HAS_PCH_IBX(dev))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
 
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
 
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
I915_WRITE(SDEIMR, ~mask);
}
 
2763,22 → 3472,16
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
 
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, gt_irqs);
POSTING_READ(GTIER);
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
 
if (INTEL_INFO(dev)->gen >= 6) {
pm_irqs |= GEN6_PM_RPS_EVENTS;
pm_irqs |= dev_priv->pm_rps_events;
 
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
 
dev_priv->pm_irq_mask = 0xffffffff;
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
I915_WRITE(GEN6_PMIER, pm_irqs);
POSTING_READ(GEN6_PMIER);
GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
}
}
 
2785,7 → 3488,7
static int ironlake_irq_postinstall(struct drm_device *dev)
{
unsigned long irqflags;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 display_mask, extra_mask;
 
if (INTEL_INFO(dev)->gen >= 7) {
2792,30 → 3495,27
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
DE_ERR_INT_IVB);
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB);
 
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
DE_POISON);
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
}
 
dev_priv->irq_mask = ~display_mask;
 
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
I915_WRITE(DEIER, display_mask | extra_mask);
POSTING_READ(DEIER);
I915_WRITE(HWSTAM, 0xeffe);
 
ibx_irq_pre_postinstall(dev);
 
GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
 
gen5_gt_irq_postinstall(dev);
 
ibx_irq_postinstall(dev);
2834,44 → 3534,113
return 0;
}
 
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
 
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
 
I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
 
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
 
i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
 
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
dev_priv->irq_mask &= ~iir_mask;
 
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
POSTING_READ(VLV_IER);
}
 
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
 
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
 
dev_priv->irq_mask |= iir_mask;
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
POSTING_READ(VLV_IIR);
 
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
 
i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
PIPE_GMBUS_INTERRUPT_STATUS);
i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
 
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
}
 
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (dev_priv->display_irqs_enabled)
return;
 
dev_priv->display_irqs_enabled = true;
 
if (dev_priv->dev->irq_enabled)
valleyview_display_irqs_install(dev_priv);
}
 
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (!dev_priv->display_irqs_enabled)
return;
 
dev_priv->display_irqs_enabled = false;
 
if (dev_priv->dev->irq_enabled)
valleyview_display_irqs_uninstall(dev_priv);
}
 
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
PIPE_CRC_DONE_ENABLE;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
dev_priv->irq_mask = ~0;
 
/*
*Leave vblank interrupts masked initially. enable/disable will
* toggle them based on usage.
*/
dev_priv->irq_mask = (~enable_mask) |
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(PIPESTAT(0), 0xffff);
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_install(dev_priv);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
I915_WRITE(VLV_IIR, 0xffffffff);
2905,43 → 3674,33
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
 
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
u32 tmp = I915_READ(GEN8_GT_IIR(i));
if (tmp)
DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
i, tmp);
I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
 
dev_priv->pm_irq_mask = 0xffffffff;
}
POSTING_READ(GEN8_GT_IER(0));
}
 
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
GEN8_PIPE_FIFO_UNDERRUN |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
GEN8_PIPE_FIFO_UNDERRUN;
int pipe;
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
 
for_each_pipe(pipe) {
u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (tmp)
DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
pipe, tmp);
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
}
POSTING_READ(GEN8_DE_PIPE_ISR(0));
for_each_pipe(pipe)
if (intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
 
I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
POSTING_READ(GEN8_DE_PORT_IER);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
}
 
static int gen8_irq_postinstall(struct drm_device *dev)
2948,6 → 3707,8
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
ibx_irq_pre_postinstall(dev);
 
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
 
2959,57 → 3720,65
return 0;
}
 
static void gen8_irq_uninstall(struct drm_device *dev)
static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
unsigned long irqflags;
int pipe;
 
if (!dev_priv)
return;
/*
* Leave vblank interrupts masked initially. enable/disable will
* toggle them based on usage.
*/
dev_priv->irq_mask = ~enable_mask;
 
atomic_set(&dev_priv->irq_received, 0);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(pipe)
i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
#define GEN8_IRQ_FINI_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
I915_WRITE(GEN8_##type##_IER(which), 0); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
} while (0)
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
 
#define GEN8_IRQ_FINI(type) do { \
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
I915_WRITE(GEN8_##type##_IER, 0); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
} while (0)
gen8_gt_irq_postinstall(dev_priv);
 
GEN8_IRQ_FINI_NDX(GT, 0);
GEN8_IRQ_FINI_NDX(GT, 1);
GEN8_IRQ_FINI_NDX(GT, 2);
GEN8_IRQ_FINI_NDX(GT, 3);
I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
POSTING_READ(GEN8_MASTER_IRQ);
 
for_each_pipe(pipe) {
GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
return 0;
}
 
GEN8_IRQ_FINI(DE_PORT);
GEN8_IRQ_FINI(DE_MISC);
GEN8_IRQ_FINI(PCU);
#undef GEN8_IRQ_FINI
#undef GEN8_IRQ_FINI_NDX
static void gen8_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
POSTING_READ(GEN8_PCU_IIR);
if (!dev_priv)
return;
 
gen8_irq_reset(dev);
}
 
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
int pipe;
 
if (!dev_priv)
return;
 
del_timer_sync(&dev_priv->hotplug_reenable_timer);
I915_WRITE(VLV_MASTER_IER, 0);
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
3017,8 → 3786,14
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_uninstall(dev_priv);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
dev_priv->irq_mask = 0;
 
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
3025,35 → 3800,65
POSTING_READ(VLV_IER);
}
 
static void ironlake_irq_uninstall(struct drm_device *dev)
static void cherryview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
if (!dev_priv)
return;
 
del_timer_sync(&dev_priv->hotplug_reenable_timer);
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
 
I915_WRITE(HWSTAM, 0xffffffff);
#define GEN8_IRQ_FINI_NDX(type, which) \
do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
I915_WRITE(GEN8_##type##_IER(which), 0); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
} while (0)
 
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
I915_WRITE(DEIIR, I915_READ(DEIIR));
if (IS_GEN7(dev))
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
#define GEN8_IRQ_FINI(type) \
do { \
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
I915_WRITE(GEN8_##type##_IER, 0); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
} while (0)
 
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
I915_WRITE(GTIIR, I915_READ(GTIIR));
GEN8_IRQ_FINI_NDX(GT, 0);
GEN8_IRQ_FINI_NDX(GT, 1);
GEN8_IRQ_FINI_NDX(GT, 2);
GEN8_IRQ_FINI_NDX(GT, 3);
 
if (HAS_PCH_NOP(dev))
GEN8_IRQ_FINI(PCU);
 
#undef GEN8_IRQ_FINI
#undef GEN8_IRQ_FINI_NDX
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
I915_WRITE(VLV_IIR, 0xffffffff);
POSTING_READ(VLV_IIR);
}
 
static void ironlake_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!dev_priv)
return;
 
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
ironlake_irq_reset(dev);
}
 
#if 0
3060,11 → 3865,9
 
static void i8xx_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE16(IMR, 0xffff);
3074,7 → 3877,7
 
static int i8xx_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
I915_WRITE16(EMR,
3099,8 → 3902,8
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
3112,7 → 3915,7
static bool i8xx_handle_vblank(struct drm_device *dev,
int plane, int pipe, u32 iir)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
// if (!drm_handle_vblank(dev, pipe))
3139,8 → 3942,8
 
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
unsigned long irqflags;
3149,8 → 3952,6
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
atomic_inc(&dev_priv->irq_received);
 
iir = I915_READ16(IIR);
if (iir == 0)
return IRQ_NONE;
3163,7 → 3964,9
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
3172,13 → 3975,9
/*
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
if (pipe_stats[pipe] & 0x8000ffff)
I915_WRITE(reg, pipe_stats[pipe]);
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
I915_WRITE16(IIR, iir & ~flip_mask);
3200,6 → 3999,10
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
 
iir = new_iir;
3210,7 → 4013,7
 
static void i8xx_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
for_each_pipe(pipe) {
3227,11 → 4030,9
 
static void i915_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3247,7 → 4048,7
 
static int i915_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
unsigned long irqflags;
 
3288,8 → 4089,8
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
3301,7 → 4102,7
static bool i915_handle_vblank(struct drm_device *dev,
int plane, int pipe, u32 iir)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
// if (!drm_handle_vblank(dev, pipe))
3328,8 → 4129,8
 
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
u32 flip_mask =
3337,8 → 4138,6
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
int pipe, ret = IRQ_NONE;
 
atomic_inc(&dev_priv->irq_received);
 
iir = I915_READ(IIR);
do {
bool irq_received = (iir & ~flip_mask) != 0;
3351,7 → 4150,9
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
3359,9 → 4160,6
 
/* Clear the PIPE*STAT regs before the IIR */
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = true;
}
3372,20 → 4170,10
break;
 
/* Consume port. Then clear IIR or we'll miss events */
if ((I915_HAS_HOTPLUG(dev)) &&
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (I915_HAS_HOTPLUG(dev) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
i9xx_hpd_irq_handler(dev);
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
 
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
}
 
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
 
3406,6 → 4194,10
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
 
if (blc_event || (iir & I915_ASLE_INTERRUPT))
3437,11 → 4229,9
 
static void i915_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
del_timer_sync(&dev_priv->hotplug_reenable_timer);
 
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3461,11 → 4251,9
 
static void i965_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
3479,7 → 4267,7
 
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
u32 error_mask;
unsigned long irqflags;
3504,9 → 4292,9
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
/*
3538,7 → 4326,7
 
static void i915_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_en;
3569,26 → 4357,22
 
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE, pipe;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
atomic_inc(&dev_priv->irq_received);
 
iir = I915_READ(IIR);
 
for (;;) {
bool irq_received = (iir & ~flip_mask) != 0;
bool blc_event = false;
 
irq_received = (iir & ~flip_mask) != 0;
 
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
* It doesn't set the bit in iir again, but it still produces
3596,7 → 4380,9
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
3606,11 → 4392,8
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = 1;
irq_received = true;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3621,26 → 4404,9
ret = IRQ_HANDLED;
 
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
HOTPLUG_INT_STATUS_G4X :
HOTPLUG_INT_STATUS_I915);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
i9xx_hpd_irq_handler(dev);
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
 
intel_hpd_irq_handler(dev, hotplug_trigger,
IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
 
if (IS_G4X(dev) &&
(hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
dp_aux_irq_handler(dev);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
 
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
 
3659,9 → 4425,12
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
 
 
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
 
3693,14 → 4462,12
 
static void i965_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
if (!dev_priv)
return;
 
del_timer_sync(&dev_priv->hotplug_reenable_timer);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
3716,9 → 4483,11
I915_WRITE(IIR, I915_READ(IIR));
}
 
static void i915_reenable_hotplug_timer_func(unsigned long data)
static void intel_hpd_irq_reenable(struct work_struct *work)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
unsigned long irqflags;
3739,7 → 4508,7
if (intel_connector->encoder->hpd_pin == i) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
drm_get_connector_name(connector));
connector->name);
connector->polled = intel_connector->polled;
if (!connector->polled)
connector->polled = DRM_CONNECTOR_POLL_HPD;
3760,9 → 4529,15
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
(unsigned long) dev_priv);
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev))
/* WaGsvRC0ResidenncyMethod:VLV */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
/* Haven't installed the IRQ handler yet */
dev_priv->pm._irqs_disabled = true;
 
if (IS_GEN2(dev)) {
dev->max_vblank_count = 0;
3780,7 → 4555,15
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
 
if (IS_VALLEYVIEW(dev)) {
if (IS_CHERRYVIEW(dev)) {
dev->driver->irq_handler = cherryview_irq_handler;
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
dev->driver->irq_uninstall = cherryview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
3790,7 → 4573,7
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_GEN8(dev)) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_preinstall;
dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
3798,7 → 4581,7
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ironlake_enable_vblank;
3839,8 → 4622,10
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
connector->polled = DRM_CONNECTOR_POLL_HPD;
if (intel_connector->mst_port)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
3851,60 → 4636,23
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
/* Disable interrupts so we can allow Package C8+. */
void hsw_pc8_disable_interrupts(struct drm_device *dev)
/* Disable interrupts so we can allow runtime PM. */
void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
 
ironlake_disable_display_irq(dev_priv, 0xffffffff);
ibx_disable_display_interrupt(dev_priv, 0xffffffff);
ilk_disable_gt_irq(dev_priv, 0xffffffff);
snb_disable_pm_irq(dev_priv, 0xffffffff);
 
dev_priv->pc8.irqs_disabled = true;
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
dev->driver->irq_uninstall(dev);
dev_priv->pm._irqs_disabled = true;
}
 
/* Restore interrupts so we can recover from Package C8+. */
void hsw_pc8_restore_interrupts(struct drm_device *dev)
/* Restore interrupts so we can recover from runtime PM. */
void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t val;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
val = I915_READ(DEIMR);
WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
 
val = I915_READ(SDEIMR);
WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
 
val = I915_READ(GTIMR);
WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
 
val = I915_READ(GEN6_PMIMR);
WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
 
dev_priv->pc8.irqs_disabled = false;
 
ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
dev_priv->pm._irqs_disabled = false;
dev->driver->irq_preinstall(dev);
dev->driver->irq_postinstall(dev);
}
 
 
/drivers/video/drm/i915/i915_params.c
0,0 → 1,169
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#include "i915_drv.h"
 
struct i915_params i915 __read_mostly = {
.modeset = 1,
.panel_ignore_lid = 1,
.powersave = 1,
.semaphores = -1,
.lvds_downclock = 0,
.lvds_channel_mode = 0,
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
.enable_hangcheck = true,
.enable_ppgtt = 1,
.enable_psr = 0,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = 1,
.enable_ips = 1,
.fastboot = 0,
.prefault_disable = 0,
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 0,
.disable_vtd_wa = 0,
.use_mmio_flip = 0,
.mmio_debug = 0,
};
 
module_param_named(modeset, i915.modeset, int, 0400);
MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
"1=on, -1=force vga console preference [default])");
 
module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
 
module_param_named(powersave, i915.powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
module_param_named(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
 
module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
MODULE_PARM_DESC(enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
 
module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
 
module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
 
module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
 
module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
 
module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
module_param_named(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
 
module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
 
module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
 
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support.");
 
module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: true)");
 
module_param_named(enable_ips, i915.enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
 
module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
 
module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
 
module_param_named(disable_display, i915.disable_display, bool, 0600);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
 
module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
 
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
 
module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
 
module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
MODULE_PARM_DESC(mmio_debug,
"Enable the MMIO debug code (default: false). This may negatively "
"affect performance.");
/drivers/video/drm/i915/i915_reg.h
26,10 → 26,11
#define _I915_REG_H_
 
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
 
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
73,11 → 74,11
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define LBB 0xf4
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
 
 
/* Graphics reset regs */
#define I965_GDRST 0xc0 /* PCI config register */
#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
84,6 → 85,13
#define GRDOM_MASK (3<<2)
#define GRDOM_RESET_ENABLE (1<<0)
 
#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
#define ILK_GRDOM_FULL (0<<1)
#define ILK_GRDOM_RENDER (1<<1)
#define ILK_GRDOM_MEDIA (3<<1)
#define ILK_GRDOM_MASK (3<<1)
#define ILK_GRDOM_RESET_ENABLE (1<<0)
 
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
#define GEN6_MBC_SNPCR_MASK (3<<21)
92,6 → 100,9
#define GEN6_MBC_SNPCR_LOW (2<<21)
#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
 
#define VLV_G3DCTL 0x9024
#define VLV_GSCKGCTL 0x9028
 
#define GEN6_MBCTL 0x0907c
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
175,9 → 186,23
#define VGA_CR_DATA_CGA 0x3d5
 
/*
* Instruction field definitions used by the command parser
*/
#define INSTR_CLIENT_SHIFT 29
#define INSTR_CLIENT_MASK 0xE0000000
#define INSTR_MI_CLIENT 0x0
#define INSTR_BC_CLIENT 0x2
#define INSTR_RC_CLIENT 0x3
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
#define INSTR_MEDIA_SUBCLIENT 0x2
 
/*
* Memory interface instructions used by the kernel
*/
#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
#define MI_GLOBAL_GTT (1<<22)
 
#define MI_NOOP MI_INSTR(0, 0)
#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
215,7 → 240,7
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
233,6 → 258,7
#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
#define MI_SEMAPHORE_SYNC_MASK (3<<16)
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
240,6 → 266,11
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
250,13 → 281,16
* - One can actually load arbitrary many arbitrary registers: Simply issue x
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
#define MI_FLUSH_DW_OP_MASK (3<<14)
#define MI_FLUSH_DW_NOTIFY (1<<8)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
318,9 → 352,12
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
328,6 → 365,7
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
335,8 → 373,96
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
 
/*
* Commands used only by the command parser
*/
#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
#define MI_ARB_CHECK MI_INSTR(0x05, 0)
#define MI_RS_CONTROL MI_INSTR(0x06, 0)
#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0)
#define MI_PREDICATE MI_INSTR(0x0C, 0)
#define MI_RS_CONTEXT MI_INSTR(0x0F, 0)
#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0)
#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
#define MI_URB_CLEAR MI_INSTR(0x19, 0)
#define MI_UPDATE_GTT MI_INSTR(0x23, 0)
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
 
#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
#define GFX_OP_3DSTATE_SO_DECL_LIST \
((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
 
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
 
#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
 
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
 
/*
* Registers used only by the command parser
*/
#define BCS_SWCTRL 0x22200
 
#define HS_INVOCATION_COUNT 0x2300
#define DS_INVOCATION_COUNT 0x2308
#define IA_VERTICES_COUNT 0x2310
#define IA_PRIMITIVES_COUNT 0x2318
#define VS_INVOCATION_COUNT 0x2320
#define GS_INVOCATION_COUNT 0x2328
#define GS_PRIMITIVES_COUNT 0x2330
#define CL_INVOCATION_COUNT 0x2338
#define CL_PRIMITIVES_COUNT 0x2340
#define PS_INVOCATION_COUNT 0x2348
#define PS_DEPTH_COUNT 0x2350
 
/* There are the 4 64-bit counter registers, one for each stream output */
#define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8)
 
#define GEN7_SO_PRIM_STORAGE_NEEDED(n) (0x5240 + (n) * 8)
 
#define GEN7_3DPRIM_END_OFFSET 0x2420
#define GEN7_3DPRIM_START_VERTEX 0x2430
#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
#define GEN7_3DPRIM_START_INSTANCE 0x243C
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
 
#define OACONTROL 0x2360
 
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
#define GEN7_PIPE_DE_LOAD_SL(pipe) _PIPE(pipe, \
_GEN7_PIPEA_DE_LOAD_SL, \
_GEN7_PIPEB_DE_LOAD_SL)
 
/*
* Reset registers
*/
#define DEBUG_RESET_I830 0x6070
358,6 → 484,7
#define IOSF_PORT_PUNIT 0x4
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
#define IOSF_PORT_DPIO_2 0x1a
#define IOSF_PORT_GPIO_NC 0x13
#define IOSF_PORT_CCK 0x14
#define IOSF_PORT_CCU 0xA9
369,22 → 496,35
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
 
#define PUNIT_OPCODE_REG_READ 6
#define PUNIT_OPCODE_REG_WRITE 7
 
#define PUNIT_REG_DSPFREQ 0x36
#define DSPFREQSTAT_SHIFT 30
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
 
/* See the PUNIT HAS v0.8 for the below bits */
enum punit_power_well {
PUNIT_POWER_WELL_RENDER = 0,
PUNIT_POWER_WELL_MEDIA = 1,
PUNIT_POWER_WELL_DISP2D = 3,
PUNIT_POWER_WELL_DPIO_CMN_BC = 5,
PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6,
PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7,
PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8,
PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
PUNIT_POWER_WELL_DPIO_RX0 = 10,
PUNIT_POWER_WELL_DPIO_RX1 = 11,
 
PUNIT_POWER_WELL_NUM,
};
 
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_CLK_GATE 1
#define PUNIT_PWR_RESET 2
#define PUNIT_PWR_GATE 3
#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2))
#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2))
#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2))
#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2))
 
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
391,10 → 531,21
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
 
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
 
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8
#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff
 
#define PUNIT_GPU_DUTYCYCLE_REG 0xdf
#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8
#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff
 
#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
406,6 → 557,11
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
 
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
#define VLV_RP_UP_EI_THRESHOLD 90
#define VLV_RP_DOWN_EI_THRESHOLD 70
#define VLV_INT_COUNT_FOR_DOWN_EI 5
 
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
440,17 → 596,97
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
 
/*
* DPIO - a special bus for various display related registers to hide behind
/**
* DOC: DPIO
*
* DPIO is VLV only.
* VLV and CHV have slightly peculiar display PHYs for driving DP/HDMI
* ports. DPIO is the name given to such a display PHY. These PHYs
* don't follow the standard programming model using direct MMIO
* registers, and instead their registers must be accessed trough IOSF
* sideband. VLV has one such PHY for driving ports B and C, and CHV
* adds another PHY for driving port D. Each PHY responds to specific
* IOSF-SB port.
*
* Note: digital port B is DDI0, digital pot C is DDI1
* Each display PHY is made up of one or two channels. Each channel
* houses a common lane part which contains the PLL and other common
* logic. CH0 common lane also contains the IOSF-SB logic for the
* Common Register Interface (CRI) ie. the DPIO registers. CRI clock
* must be running when any DPIO registers are accessed.
*
* In addition to having their own registers, the PHYs are also
* controlled through some dedicated signals from the display
* controller. These include PLL reference clock enable, PLL enable,
* and CRI clock selection, for example.
*
* Eeach channel also has two splines (also called data lanes), and
* each spline is made up of one Physical Access Coding Sub-Layer
* (PCS) block and two TX lanes. So each channel has two PCS blocks
* and four TX lanes. The TX lanes are used as DP lanes or TMDS
* data/clock pairs depending on the output type.
*
* Additionally the PHY also contains an AUX lane with AUX blocks
* for each channel. This is used for DP AUX communication, but
* this fact isn't really relevant for the driver since AUX is
* controlled from the display controller side. No DPIO registers
* need to be accessed during AUX communication,
*
* Generally the common lane corresponds to the pipe and
* the spline (PCS/TX) correponds to the port.
*
* For dual channel PHY (VLV/CHV):
*
* pipe A == CMN/PLL/REF CH0
*
* pipe B == CMN/PLL/REF CH1
*
* port B == PCS/TX CH0
*
* port C == PCS/TX CH1
*
* This is especially important when we cross the streams
* ie. drive port B with pipe B, or port C with pipe A.
*
* For single channel PHY (CHV):
*
* pipe C == CMN/PLL/REF CH0
*
* port D == PCS/TX CH0
*
* Note: digital port B is DDI0, digital port C is DDI1,
* digital port D is DDI2
*/
/*
* Dual channel PHY (VLV/CHV)
* ---------------------------------
* | CH0 | CH1 |
* | CMN/PLL/REF | CMN/PLL/REF |
* |---------------|---------------| Display PHY
* | PCS01 | PCS23 | PCS01 | PCS23 |
* |-------|-------|-------|-------|
* |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
* ---------------------------------
* | DDI0 | DDI1 | DP/HDMI ports
* ---------------------------------
*
* Single channel PHY (CHV)
* -----------------
* | CH0 |
* | CMN/PLL/REF |
* |---------------| Display PHY
* | PCS01 | PCS23 |
* |-------|-------|
* |TX0|TX1|TX2|TX3|
* -----------------
* | DDI2 | DP/HDMI port
* -----------------
*/
#define DPIO_DEVFN 0
#define DPIO_OPCODE_REG_WRITE 1
#define DPIO_OPCODE_REG_READ 0
 
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
527,8 → 763,16
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
#define _VLV_PCS01_DW0_CH0 0x200
#define _VLV_PCS23_DW0_CH0 0x400
#define _VLV_PCS01_DW0_CH1 0x2600
#define _VLV_PCS23_DW0_CH1 0x2800
#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
 
#define _VLV_PCS_DW1_CH0 0x8204
#define _VLV_PCS_DW1_CH1 0x8404
#define CHV_PCS_REQ_SOFTRESET_EN (1<<23)
#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
535,8 → 779,17
#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
 
#define _VLV_PCS01_DW1_CH0 0x204
#define _VLV_PCS23_DW1_CH0 0x404
#define _VLV_PCS01_DW1_CH1 0x2604
#define _VLV_PCS23_DW1_CH1 0x2804
#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1_CH1)
#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1_CH1)
 
#define _VLV_PCS_DW8_CH0 0x8220
#define _VLV_PCS_DW8_CH1 0x8420
#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
 
#define _VLV_PCS01_DW8_CH0 0x0220
550,6 → 803,19
#define _VLV_PCS_DW9_CH1 0x8424
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
 
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
 
#define _VLV_PCS01_DW10_CH0 0x0228
#define _VLV_PCS23_DW10_CH0 0x0428
#define _VLV_PCS01_DW10_CH1 0x2628
#define _VLV_PCS23_DW10_CH1 0x2828
#define VLV_PCS01_DW10(port) _PORT(port, _VLV_PCS01_DW10_CH0, _VLV_PCS01_DW10_CH1)
#define VLV_PCS23_DW10(port) _PORT(port, _VLV_PCS23_DW10_CH0, _VLV_PCS23_DW10_CH1)
 
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
568,14 → 834,21
 
#define _VLV_TX_DW2_CH0 0x8288
#define _VLV_TX_DW2_CH1 0x8488
#define DPIO_SWING_MARGIN_SHIFT 16
#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT)
#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
 
#define _VLV_TX_DW3_CH0 0x828c
#define _VLV_TX_DW3_CH1 0x848c
/* The following bit for CHV phy */
#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
 
#define _VLV_TX_DW4_CH0 0x8290
#define _VLV_TX_DW4_CH1 0x8490
#define DPIO_SWING_DEEMPH9P5_SHIFT 24
#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
 
#define _VLV_TX3_DW4_CH0 0x690
595,6 → 868,96
#define _VLV_TX_DW14_CH1 0x84b8
#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
 
/* CHV dpPhy registers */
#define _CHV_PLL_DW0_CH0 0x8000
#define _CHV_PLL_DW0_CH1 0x8180
#define CHV_PLL_DW0(ch) _PIPE(ch, _CHV_PLL_DW0_CH0, _CHV_PLL_DW0_CH1)
 
#define _CHV_PLL_DW1_CH0 0x8004
#define _CHV_PLL_DW1_CH1 0x8184
#define DPIO_CHV_N_DIV_SHIFT 8
#define DPIO_CHV_M1_DIV_BY_2 (0 << 0)
#define CHV_PLL_DW1(ch) _PIPE(ch, _CHV_PLL_DW1_CH0, _CHV_PLL_DW1_CH1)
 
#define _CHV_PLL_DW2_CH0 0x8008
#define _CHV_PLL_DW2_CH1 0x8188
#define CHV_PLL_DW2(ch) _PIPE(ch, _CHV_PLL_DW2_CH0, _CHV_PLL_DW2_CH1)
 
#define _CHV_PLL_DW3_CH0 0x800c
#define _CHV_PLL_DW3_CH1 0x818c
#define DPIO_CHV_FRAC_DIV_EN (1 << 16)
#define DPIO_CHV_FIRST_MOD (0 << 8)
#define DPIO_CHV_SECOND_MOD (1 << 8)
#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
 
#define _CHV_PLL_DW6_CH0 0x8018
#define _CHV_PLL_DW6_CH1 0x8198
#define DPIO_CHV_GAIN_CTRL_SHIFT 16
#define DPIO_CHV_INT_COEFF_SHIFT 8
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
 
#define _CHV_CMN_DW5_CH0 0x8114
#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
#define CHV_BUFRIGHTENA1_MASK (3 << 20)
#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
#define CHV_BUFLEFTENA1_FORCE (3 << 22)
#define CHV_BUFLEFTENA1_MASK (3 << 22)
 
#define _CHV_CMN_DW13_CH0 0x8134
#define _CHV_CMN_DW0_CH1 0x8080
#define DPIO_CHV_S1_DIV_SHIFT 21
#define DPIO_CHV_P1_DIV_SHIFT 13 /* 3 bits */
#define DPIO_CHV_P2_DIV_SHIFT 8 /* 5 bits */
#define DPIO_CHV_K_DIV_SHIFT 4
#define DPIO_PLL_FREQLOCK (1 << 1)
#define DPIO_PLL_LOCK (1 << 0)
#define CHV_CMN_DW13(ch) _PIPE(ch, _CHV_CMN_DW13_CH0, _CHV_CMN_DW0_CH1)
 
#define _CHV_CMN_DW14_CH0 0x8138
#define _CHV_CMN_DW1_CH1 0x8084
#define DPIO_AFC_RECAL (1 << 14)
#define DPIO_DCLKP_EN (1 << 13)
#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
 
#define _CHV_CMN_DW19_CH0 0x814c
#define _CHV_CMN_DW6_CH1 0x8098
#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
 
#define CHV_CMN_DW30 0x8178
#define DPIO_LRC_BYPASS (1 << 3)
 
#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
(lane) * 0x200 + (offset))
 
#define CHV_TX_DW0(ch, lane) _TXLANE(ch, lane, 0x80)
#define CHV_TX_DW1(ch, lane) _TXLANE(ch, lane, 0x84)
#define CHV_TX_DW2(ch, lane) _TXLANE(ch, lane, 0x88)
#define CHV_TX_DW3(ch, lane) _TXLANE(ch, lane, 0x8c)
#define CHV_TX_DW4(ch, lane) _TXLANE(ch, lane, 0x90)
#define CHV_TX_DW5(ch, lane) _TXLANE(ch, lane, 0x94)
#define CHV_TX_DW6(ch, lane) _TXLANE(ch, lane, 0x98)
#define CHV_TX_DW7(ch, lane) _TXLANE(ch, lane, 0x9c)
#define CHV_TX_DW8(ch, lane) _TXLANE(ch, lane, 0xa0)
#define CHV_TX_DW9(ch, lane) _TXLANE(ch, lane, 0xa4)
#define CHV_TX_DW10(ch, lane) _TXLANE(ch, lane, 0xa8)
#define CHV_TX_DW11(ch, lane) _TXLANE(ch, lane, 0xac)
#define DPIO_FRC_LATENCY_SHFIT 8
#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
#define DPIO_UPAR_SHIFT 30
/*
* Fence registers
*/
622,6 → 985,7
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
 
 
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
631,10 → 995,14
/*
* Instruction and interrupt control regs
*/
#define PGTBL_CTL 0x02020
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
#define GEN8_BSD2_RING_BASE 0x1c000
#define VEBOX_RING_BASE 0x1a000
#define BLT_RING_BASE 0x22000
#define RING_TAIL(base) ((base)+0x30)
660,9 → 1028,20
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define ARB_MODE 0x04030
 
#define GEN7_WR_WATERMARK 0x4028
#define GEN7_GFX_PRIO_CTRL 0x402C
#define ARB_MODE 0x4030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
#define GEN7_GFX_PEND_TLB0 0x4034
#define GEN7_GFX_PEND_TLB1 0x4038
/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
#define GEN7_LRA_LIMITS_BASE 0x403C
#define GEN7_LRA_LIMITS_REG_NUM 13
#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
#define GEN7_GFX_MAX_REQ_COUNT 0x4074
 
#define GAMTARBMODE 0x04a08
#define ARB_MODE_BWGTLB_DISABLE (1<<9)
#define ARB_MODE_SWIZZLE_BDW (1<<1)
678,6 → 1057,7
#define BLT_HWS_PGA_GEN7 (0x04280)
#define VEBOX_HWS_PGA_GEN7 (0x04380)
#define RING_ACTHD(base) ((base)+0x74)
#define RING_ACTHD_UDW(base) ((base)+0x5c)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define RING_TIMESTAMP(base) ((base)+0x358)
696,6 → 1076,9
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
 
#define GEN7_TLB_RD_ADDR 0x4700
 
#if 0
#define PRB0_TAIL 0x02030
#define PRB0_HEAD 0x02034
719,7 → 1102,9
#define RING_INSTDONE(base) ((base)+0x6c)
#define RING_INSTPS(base) ((base)+0x70)
#define RING_DMA_FADD(base) ((base)+0x78)
#define RING_DMA_FADD_UDW(base) ((base)+0x60) /* gen8+ */
#define RING_INSTPM(base) ((base)+0xc0)
#define RING_MI_MODE(base) ((base)+0x9c)
#define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
789,15 → 1174,23
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1)
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
 
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9)
# define STOP_RING (1 << 8)
 
#define GEN6_GT_MODE 0x20d0
#define GEN6_GT_MODE_HI (1 << 9)
#define GEN7_GT_MODE 0x7008
#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
 
#define GFX_MODE 0x02520
804,7 → 1197,7
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
#define GFX_REPLAY_MODE (1<<11)
#define GFX_PSMI_GRANULARITY (1<<10)
811,7 → 1204,10
#define GFX_PPGTT_ENABLE (1<<9)
 
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
 
#define VLV_GU_CTL0 (VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 (VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
818,7 → 1214,9
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
#define GINT_DIS (1<<22)
#define GCFG_DIS (1<<8)
#define VLV_GUNIT_CLOCK_GATE2 (VLV_DISPLAY_BASE + 0x2064)
#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
825,6 → 1223,8
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
 
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
837,7 → 1237,7
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
#define INSTPM_SELF_EN (1<<12) /* 915GM only */
#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
918,6 → 1318,10
#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
 
#define MI_STATE 0x020e4 /* gen2 only */
#define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */
#define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */
 
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
934,13 → 1338,21
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
 
#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
#define RC_OP_FLUSH_ENABLE (1<<0)
#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
 
#define GEN6_BLITTER_ECOSKPD 0x221d0
#define GEN6_BLITTER_LOCK_SHIFT 16
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
 
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
 
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
980,21 → 1392,40
 
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1<<5)
 
#define I915_PM_INTERRUPT (1<<31)
#define I915_ISP_INTERRUPT (1<<22)
#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
#define I915_MIPIB_INTERRUPT (1<<19)
#define I915_MIPIA_INTERRUPT (1<<18)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16)
#define I915_MASTER_ERROR_INTERRUPT (1<<15)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14)
#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13)
#define I915_HWB_OOM_INTERRUPT (1<<13)
#define I915_LPE_PIPE_C_INTERRUPT (1<<12)
#define I915_SYNC_STATUS_INTERRUPT (1<<12)
#define I915_MISC_INTERRUPT (1<<11)
#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10)
#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9)
#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8)
#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3)
#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2)
#define I915_DEBUG_INTERRUPT (1<<2)
#define I915_WINVALID_INTERRUPT (1<<1)
#define I915_USER_INTERRUPT (1<<1)
#define I915_ASLE_INTERRUPT (1<<0)
#define I915_BSD_USER_INTERRUPT (1 << 25)
1046,9 → 1477,8
#define FBC_CTL_IDLE_LINE (2<<2)
#define FBC_CTL_IDLE_DEBUG (3<<2)
#define FBC_CTL_CPU_FENCE (1<<1)
#define FBC_CTL_PLANEA (0<<0)
#define FBC_CTL_PLANEB (1<<0)
#define FBC_FENCE_OFF 0x0321b
#define FBC_CTL_PLANE(plane) ((plane)<<0)
#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG 0x03300
 
#define FBC_LL_SIZE (1536)
1057,9 → 1487,8
#define DPFC_CB_BASE 0x3200
#define DPFC_CONTROL 0x3208
#define DPFC_CTL_EN (1<<31)
#define DPFC_CTL_PLANEA (0<<30)
#define DPFC_CTL_PLANEB (1<<30)
#define IVB_DPFC_CTL_PLANE_SHIFT (29)
#define DPFC_CTL_PLANE(plane) ((plane)<<30)
#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29)
#define DPFC_CTL_FENCE_EN (1<<29)
#define IVB_DPFC_CTL_FENCE_EN (1<<28)
#define DPFC_CTL_PERSISTENT_MODE (1<<25)
1120,13 → 1549,6
#define FBC_REND_NUKE (1<<2)
#define FBC_REND_CACHE_CLEAN (1<<1)
 
#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
#define HSW_BYPASS_FBC_QUEUE (1<<22)
#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
_HSW_PIPE_SLICE_CHICKEN_1_A, + \
_HSW_PIPE_SLICE_CHICKEN_1_B)
 
/*
* GPIO regs
*/
1163,6 → 1585,7
#define GMBUS_PORT_SSC 1
#define GMBUS_PORT_VGADDC 2
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPD_CHV 3 /* HDMID_CHV */
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
#define GMBUS_PORT_DPD 6 /* HDMID */
1202,6 → 1625,10
/*
* Clock control & power management
*/
#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
 
#define VGA0 0x6000
#define VGA1 0x6004
1214,9 → 1641,6
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_SDVO_HIGH_SPEED (1 << 30)
#define DPLL_DVO_2X_MODE (1 << 30)
1237,10 → 1661,23
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
 
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
 
/* Additional CHV pll/phy registers */
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \
((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30))
 
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
* this field (only one bit may be set).
1278,7 → 1715,12
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
 
#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
 
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
1315,8 → 1757,6
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
 
#define _FPA0 0x06040
#define _FPA1 0x06044
1348,7 → 1788,7
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200)
#define DSPCLK_GATE_D (dev_priv->info.display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
1377,7 → 1817,7
# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
/**
/*
* This bit must be set on the 830 to prevent hangs when turning off the
* overlay scaler.
*/
1397,12 → 1837,12
# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
# define MAG_CLOCK_GATE_DISABLE (1 << 5)
/** This bit must be unset on 855,865 */
/* This bit must be unset on 855,865 */
# define MECI_CLOCK_GATE_DISABLE (1 << 4)
# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
# define MEC_CLOCK_GATE_DISABLE (1 << 2)
# define MECO_CLOCK_GATE_DISABLE (1 << 1)
/** This bit must be set on 855,865. */
/* This bit must be set on 855,865. */
# define SV_CLOCK_GATE_DISABLE (1 << 0)
# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
1423,7 → 1863,7
# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
 
# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
/** This bit must always be set on 965G/965GM */
/* This bit must always be set on 965G/965GM */
# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
1430,7 → 1870,7
# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
/** This bit must always be set on 965G */
/* This bit must always be set on 965G */
# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
1455,6 → 1895,10
#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
 
#define VDECCLK_GATE_D 0x620C /* g4x only */
#define VCP_UNIT_CLOCK_GATE_DISABLE (1 << 4)
 
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
 
1472,11 → 1916,12
/*
* Palette regs
*/
#define PALETTE_A_OFFSET 0xa000
#define PALETTE_B_OFFSET 0xa800
#define CHV_PALETTE_C_OFFSET 0xc000
#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
dev_priv->info.display_mmio_offset)
 
#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
 
/* MCH MMIO space */
 
/*
1496,7 → 1941,7
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
 
/** 915-945 and GM965 MCH register controlling DRAM channel access */
/* 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
1505,15 → 1950,15
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
 
/** Pineview MCH register contains DDR3 setting */
/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
#define CSHRDDR3CTL_DDR3 (1 << 2)
 
/** 965 MCH register controlling DRAM channel configuration */
/* 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
#define C1DRB3 0x10606
 
/** snb MCH registers for reading the DRAM channel configuration */
/* snb MCH registers for reading the DRAM channel configuration */
#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
1535,7 → 1980,7
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
 
/** snb MCH registers for priority tuning */
/* snb MCH registers for priority tuning */
#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
1839,7 → 2284,7
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
 
 
#define CHV_CLK_CTL1 0x101100
#define VLV_CLK_CTL2 0x101104
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
 
1862,7 → 2307,7
*/
 
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
#define _PIPE_CRC_CTL_A 0x60050
#define PIPE_CRC_ENABLE (1 << 31)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
1902,11 → 2347,11
#define _PIPE_CRC_RES_4_A_IVB 0x60070
#define _PIPE_CRC_RES_5_A_IVB 0x60074
 
#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
#define _PIPE_CRC_RES_RED_A 0x60060
#define _PIPE_CRC_RES_GREEN_A 0x60064
#define _PIPE_CRC_RES_BLUE_A 0x60068
#define _PIPE_CRC_RES_RES1_A_I915 0x6006c
#define _PIPE_CRC_RES_RES2_A_G4X 0x60080
 
/* Pipe B CRC regs */
#define _PIPE_CRC_RES_1_B_IVB 0x61064
1915,64 → 2360,76
#define _PIPE_CRC_RES_4_B_IVB 0x61070
#define _PIPE_CRC_RES_5_B_IVB 0x61074
 
#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
#define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A)
#define PIPE_CRC_RES_1_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
_TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB)
#define PIPE_CRC_RES_2_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
_TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB)
#define PIPE_CRC_RES_3_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
_TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB)
#define PIPE_CRC_RES_4_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
_TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB)
#define PIPE_CRC_RES_5_IVB(pipe) \
_PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
_TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB)
 
#define PIPE_CRC_RES_RED(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
_TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A)
#define PIPE_CRC_RES_GREEN(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
_TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A)
#define PIPE_CRC_RES_BLUE(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
_TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A)
#define PIPE_CRC_RES_RES1_I915(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
_TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915)
#define PIPE_CRC_RES_RES2_G4X(pipe) \
_PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
_TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
 
/* Pipe A timing regs */
#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
#define _HTOTAL_A 0x60000
#define _HBLANK_A 0x60004
#define _HSYNC_A 0x60008
#define _VTOTAL_A 0x6000c
#define _VBLANK_A 0x60010
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
 
/* Pipe B timing regs */
#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
#define _HTOTAL_B 0x61000
#define _HBLANK_B 0x61004
#define _HSYNC_B 0x61008
#define _VTOTAL_B 0x6100c
#define _VBLANK_B 0x61010
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
#define _VSYNCSHIFT_B 0x61028
 
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
#define CHV_TRANSCODER_C_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
 
#define _TRANSCODER2(pipe, reg) (dev_priv->info.trans_offsets[(pipe)] - \
dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
dev_priv->info.display_mmio_offset)
 
#define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A)
#define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A)
#define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A)
#define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A)
#define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A)
#define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A)
#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
 
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
#define BDW_PSR_SINGLE_FRAME (1<<30)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
2084,7 → 2541,7
 
 
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
#define PORT_HOTPLUG_EN (dev_priv->info.display_mmio_offset + 0x61110)
#define PORTB_HOTPLUG_INT_EN (1 << 29)
#define PORTC_HOTPLUG_INT_EN (1 << 28)
#define PORTD_HOTPLUG_INT_EN (1 << 27)
2114,7 → 2571,7
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
 
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
/*
* HDMI/DP bits are gen4+
*
2130,8 → 2587,14
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
2177,6 → 2640,7
#define GEN3_SDVOC 0x61160
#define GEN4_HDMIB GEN3_SDVOB
#define GEN4_HDMIC GEN3_SDVOC
#define CHV_HDMID 0x6116C
#define PCH_SDVOB 0xe1140
#define PCH_HDMIB PCH_SDVOB
#define PCH_HDMIC 0xe1150
2184,7 → 2648,7
 
#define PORT_DFT_I9XX 0x61150
#define DC_BALANCE_RESET (1 << 25)
#define PORT_DFT2_G4X 0x61154
#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
2197,7 → 2661,7
#define SDVO_PIPE_B_SELECT (1 << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
/**
/*
* 915G/GM SDVO pixel multiplier.
* Programmed value is multiplier - 1, up to 5x.
* \sa DPLL_MD_UDI_MULTIPLIER_MASK
2237,7 → 2701,11
#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
 
/* CHV SDVO/HDMI bits: */
#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
 
 
/* DVO port control */
#define DVOA 0x61120
#define DVOB 0x61140
2332,9 → 2800,7
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29)
#define VIDEO_DIP_PORT_D (3 << 29)
#define VIDEO_DIP_PORT(port) ((port) << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_GCP (1 << 25)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
2391,7 → 2857,7
#define PP_DIVISOR 0x61210
 
/* Panel fitting */
#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
#define PFIT_CONTROL (dev_priv->info.display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
2409,7 → 2875,7
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
#define PFIT_PGM_RATIOS (dev_priv->info.display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
2421,25 → 2887,25
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
 
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
#define PFIT_AUTO_RATIOS (dev_priv->info.display_mmio_offset + 0x61238)
 
#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
_VLV_BLC_PWM_CTL2_B)
 
#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
_VLV_BLC_PWM_CTL_B)
 
#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
_VLV_BLC_HIST_CTL_B)
 
/* Backlight control */
#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
#define BLC_PWM_CTL2 (dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
2462,7 → 2928,7
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
#define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254)
#define BLC_PWM_CTL (dev_priv->info.display_mmio_offset + 0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
2484,7 → 2950,7
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
 
#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260)
#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
 
/* New registers for PCH-split platforms. Safe where new bits show up, the
* register layout machtes with gen4 BLC_PWM_CTL[12]. */
2509,65 → 2975,65
 
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
/* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31)
/** Sources the TV encoder input from pipe B instead of A. */
/* Sources the TV encoder input from pipe B instead of A. */
# define TV_ENC_PIPEB_SELECT (1 << 30)
/** Outputs composite video (DAC A only) */
/* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
/** Outputs SVideo video (DAC B/C) */
/* Outputs SVideo video (DAC B/C) */
# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
/** Outputs Component video (DAC A/B/C) */
/* Outputs Component video (DAC A/B/C) */
# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
/** Outputs Composite and SVideo (DAC A/B/C) */
/* Outputs Composite and SVideo (DAC A/B/C) */
# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
# define TV_TRILEVEL_SYNC (1 << 21)
/** Enables slow sync generation (945GM only) */
/* Enables slow sync generation (945GM only) */
# define TV_SLOW_SYNC (1 << 20)
/** Selects 4x oversampling for 480i and 576p */
/* Selects 4x oversampling for 480i and 576p */
# define TV_OVERSAMPLE_4X (0 << 18)
/** Selects 2x oversampling for 720p and 1080i */
/* Selects 2x oversampling for 720p and 1080i */
# define TV_OVERSAMPLE_2X (1 << 18)
/** Selects no oversampling for 1080p */
/* Selects no oversampling for 1080p */
# define TV_OVERSAMPLE_NONE (2 << 18)
/** Selects 8x oversampling */
/* Selects 8x oversampling */
# define TV_OVERSAMPLE_8X (3 << 18)
/** Selects progressive mode rather than interlaced */
/* Selects progressive mode rather than interlaced */
# define TV_PROGRESSIVE (1 << 17)
/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
/* Sets the colorburst to PAL mode. Required for non-M PAL modes. */
# define TV_PAL_BURST (1 << 16)
/** Field for setting delay of Y compared to C */
/* Field for setting delay of Y compared to C */
# define TV_YC_SKEW_MASK (7 << 12)
/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
/* Enables a fix for 480p/576p standard definition modes on the 915GM only */
# define TV_ENC_SDP_FIX (1 << 11)
/**
/*
* Enables a fix for the 915GM only.
*
* Not sure what it does.
*/
# define TV_ENC_C0_FIX (1 << 10)
/** Bits that must be preserved by software */
/* Bits that must be preserved by software */
# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
# define TV_FUSE_STATE_MASK (3 << 4)
/** Read-only state that reports all features enabled */
/* Read-only state that reports all features enabled */
# define TV_FUSE_STATE_ENABLED (0 << 4)
/** Read-only state that reports that Macrovision is disabled in hardware*/
/* Read-only state that reports that Macrovision is disabled in hardware*/
# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
/** Read-only state that reports that TV-out is disabled in hardware. */
/* Read-only state that reports that TV-out is disabled in hardware. */
# define TV_FUSE_STATE_DISABLED (2 << 4)
/** Normal operation */
/* Normal operation */
# define TV_TEST_MODE_NORMAL (0 << 0)
/** Encoder test pattern 1 - combo pattern */
/* Encoder test pattern 1 - combo pattern */
# define TV_TEST_MODE_PATTERN_1 (1 << 0)
/** Encoder test pattern 2 - full screen vertical 75% color bars */
/* Encoder test pattern 2 - full screen vertical 75% color bars */
# define TV_TEST_MODE_PATTERN_2 (2 << 0)
/** Encoder test pattern 3 - full screen horizontal 75% color bars */
/* Encoder test pattern 3 - full screen horizontal 75% color bars */
# define TV_TEST_MODE_PATTERN_3 (3 << 0)
/** Encoder test pattern 4 - random noise */
/* Encoder test pattern 4 - random noise */
# define TV_TEST_MODE_PATTERN_4 (4 << 0)
/** Encoder test pattern 5 - linear color ramps */
/* Encoder test pattern 5 - linear color ramps */
# define TV_TEST_MODE_PATTERN_5 (5 << 0)
/**
/*
* This test mode forces the DACs to 50% of full output.
*
* This is used for load detection in combination with TVDAC_SENSE_MASK
2577,7 → 3043,7
 
#define TV_DAC 0x68004
# define TV_DAC_SAVE 0x00ffff00
/**
/*
* Reports that DAC state change logic has reported change (RO).
*
* This gets cleared when TV_DAC_STATE_EN is cleared
2584,13 → 3050,13
*/
# define TVDAC_STATE_CHG (1 << 31)
# define TVDAC_SENSE_MASK (7 << 28)
/** Reports that DAC A voltage is above the detect threshold */
/* Reports that DAC A voltage is above the detect threshold */
# define TVDAC_A_SENSE (1 << 30)
/** Reports that DAC B voltage is above the detect threshold */
/* Reports that DAC B voltage is above the detect threshold */
# define TVDAC_B_SENSE (1 << 29)
/** Reports that DAC C voltage is above the detect threshold */
/* Reports that DAC C voltage is above the detect threshold */
# define TVDAC_C_SENSE (1 << 28)
/**
/*
* Enables DAC state detection logic, for load-based TV detection.
*
* The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
2597,15 → 3063,15
* to off, for load detection to work.
*/
# define TVDAC_STATE_CHG_EN (1 << 27)
/** Sets the DAC A sense value to high */
/* Sets the DAC A sense value to high */
# define TVDAC_A_SENSE_CTL (1 << 26)
/** Sets the DAC B sense value to high */
/* Sets the DAC B sense value to high */
# define TVDAC_B_SENSE_CTL (1 << 25)
/** Sets the DAC C sense value to high */
/* Sets the DAC C sense value to high */
# define TVDAC_C_SENSE_CTL (1 << 24)
/** Overrides the ENC_ENABLE and DAC voltage levels */
/* Overrides the ENC_ENABLE and DAC voltage levels */
# define DAC_CTL_OVERRIDE (1 << 7)
/** Sets the slew rate. Must be preserved in software */
/* Sets the slew rate. Must be preserved in software */
# define ENC_TVDAC_SLEW_FAST (1 << 6)
# define DAC_A_1_3_V (0 << 4)
# define DAC_A_1_1_V (1 << 4)
2620,7 → 3086,7
# define DAC_C_0_7_V (2 << 0)
# define DAC_C_MASK (3 << 0)
 
/**
/*
* CSC coefficients are stored in a floating point format with 9 bits of
* mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
* where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
2635,7 → 3101,7
#define TV_CSC_Y2 0x68014
# define TV_BY_MASK 0x07ff0000
# define TV_BY_SHIFT 16
/**
/*
* Y attenuation for component video.
*
* Stored in 1.9 fixed point.
2652,7 → 3118,7
#define TV_CSC_U2 0x6801c
# define TV_BU_MASK 0x07ff0000
# define TV_BU_SHIFT 16
/**
/*
* U attenuation for component video.
*
* Stored in 1.9 fixed point.
2669,7 → 3135,7
#define TV_CSC_V2 0x68024
# define TV_BV_MASK 0x07ff0000
# define TV_BV_SHIFT 16
/**
/*
* V attenuation for component video.
*
* Stored in 1.9 fixed point.
2678,74 → 3144,74
# define TV_AV_SHIFT 0
 
#define TV_CLR_KNOBS 0x68028
/** 2s-complement brightness adjustment */
/* 2s-complement brightness adjustment */
# define TV_BRIGHTNESS_MASK 0xff000000
# define TV_BRIGHTNESS_SHIFT 24
/** Contrast adjustment, as a 2.6 unsigned floating point number */
/* Contrast adjustment, as a 2.6 unsigned floating point number */
# define TV_CONTRAST_MASK 0x00ff0000
# define TV_CONTRAST_SHIFT 16
/** Saturation adjustment, as a 2.6 unsigned floating point number */
/* Saturation adjustment, as a 2.6 unsigned floating point number */
# define TV_SATURATION_MASK 0x0000ff00
# define TV_SATURATION_SHIFT 8
/** Hue adjustment, as an integer phase angle in degrees */
/* Hue adjustment, as an integer phase angle in degrees */
# define TV_HUE_MASK 0x000000ff
# define TV_HUE_SHIFT 0
 
#define TV_CLR_LEVEL 0x6802c
/** Controls the DAC level for black */
/* Controls the DAC level for black */
# define TV_BLACK_LEVEL_MASK 0x01ff0000
# define TV_BLACK_LEVEL_SHIFT 16
/** Controls the DAC level for blanking */
/* Controls the DAC level for blanking */
# define TV_BLANK_LEVEL_MASK 0x000001ff
# define TV_BLANK_LEVEL_SHIFT 0
 
#define TV_H_CTL_1 0x68030
/** Number of pixels in the hsync. */
/* Number of pixels in the hsync. */
# define TV_HSYNC_END_MASK 0x1fff0000
# define TV_HSYNC_END_SHIFT 16
/** Total number of pixels minus one in the line (display and blanking). */
/* Total number of pixels minus one in the line (display and blanking). */
# define TV_HTOTAL_MASK 0x00001fff
# define TV_HTOTAL_SHIFT 0
 
#define TV_H_CTL_2 0x68034
/** Enables the colorburst (needed for non-component color) */
/* Enables the colorburst (needed for non-component color) */
# define TV_BURST_ENA (1 << 31)
/** Offset of the colorburst from the start of hsync, in pixels minus one. */
/* Offset of the colorburst from the start of hsync, in pixels minus one. */
# define TV_HBURST_START_SHIFT 16
# define TV_HBURST_START_MASK 0x1fff0000
/** Length of the colorburst */
/* Length of the colorburst */
# define TV_HBURST_LEN_SHIFT 0
# define TV_HBURST_LEN_MASK 0x0001fff
 
#define TV_H_CTL_3 0x68038
/** End of hblank, measured in pixels minus one from start of hsync */
/* End of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_END_SHIFT 16
# define TV_HBLANK_END_MASK 0x1fff0000
/** Start of hblank, measured in pixels minus one from start of hsync */
/* Start of hblank, measured in pixels minus one from start of hsync */
# define TV_HBLANK_START_SHIFT 0
# define TV_HBLANK_START_MASK 0x0001fff
 
#define TV_V_CTL_1 0x6803c
/** XXX */
/* XXX */
# define TV_NBR_END_SHIFT 16
# define TV_NBR_END_MASK 0x07ff0000
/** XXX */
/* XXX */
# define TV_VI_END_F1_SHIFT 8
# define TV_VI_END_F1_MASK 0x00003f00
/** XXX */
/* XXX */
# define TV_VI_END_F2_SHIFT 0
# define TV_VI_END_F2_MASK 0x0000003f
 
#define TV_V_CTL_2 0x68040
/** Length of vsync, in half lines */
/* Length of vsync, in half lines */
# define TV_VSYNC_LEN_MASK 0x07ff0000
# define TV_VSYNC_LEN_SHIFT 16
/** Offset of the start of vsync in field 1, measured in one less than the
/* Offset of the start of vsync in field 1, measured in one less than the
* number of half lines.
*/
# define TV_VSYNC_START_F1_MASK 0x00007f00
# define TV_VSYNC_START_F1_SHIFT 8
/**
/*
* Offset of the start of vsync in field 2, measured in one less than the
* number of half lines.
*/
2753,17 → 3219,17
# define TV_VSYNC_START_F2_SHIFT 0
 
#define TV_V_CTL_3 0x68044
/** Enables generation of the equalization signal */
/* Enables generation of the equalization signal */
# define TV_EQUAL_ENA (1 << 31)
/** Length of vsync, in half lines */
/* Length of vsync, in half lines */
# define TV_VEQ_LEN_MASK 0x007f0000
# define TV_VEQ_LEN_SHIFT 16
/** Offset of the start of equalization in field 1, measured in one less than
/* Offset of the start of equalization in field 1, measured in one less than
* the number of half lines.
*/
# define TV_VEQ_START_F1_MASK 0x0007f00
# define TV_VEQ_START_F1_SHIFT 8
/**
/*
* Offset of the start of equalization in field 2, measured in one less than
* the number of half lines.
*/
2771,13 → 3237,13
# define TV_VEQ_START_F2_SHIFT 0
 
#define TV_V_CTL_4 0x68048
/**
/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F1_MASK 0x003f0000
# define TV_VBURST_START_F1_SHIFT 16
/**
/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
2785,13 → 3251,13
# define TV_VBURST_END_F1_SHIFT 0
 
#define TV_V_CTL_5 0x6804c
/**
/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F2_MASK 0x003f0000
# define TV_VBURST_START_F2_SHIFT 16
/**
/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
2799,13 → 3265,13
# define TV_VBURST_END_F2_SHIFT 0
 
#define TV_V_CTL_6 0x68050
/**
/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F3_MASK 0x003f0000
# define TV_VBURST_START_F3_SHIFT 16
/**
/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
2813,13 → 3279,13
# define TV_VBURST_END_F3_SHIFT 0
 
#define TV_V_CTL_7 0x68054
/**
/*
* Offset to start of vertical colorburst, measured in one less than the
* number of lines from vertical start.
*/
# define TV_VBURST_START_F4_MASK 0x003f0000
# define TV_VBURST_START_F4_SHIFT 16
/**
/*
* Offset to the end of vertical colorburst, measured in one less than the
* number of lines from the start of NBR.
*/
2827,56 → 3293,56
# define TV_VBURST_END_F4_SHIFT 0
 
#define TV_SC_CTL_1 0x68060
/** Turns on the first subcarrier phase generation DDA */
/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA1_EN (1 << 31)
/** Turns on the first subcarrier phase generation DDA */
/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA2_EN (1 << 30)
/** Turns on the first subcarrier phase generation DDA */
/* Turns on the first subcarrier phase generation DDA */
# define TV_SC_DDA3_EN (1 << 29)
/** Sets the subcarrier DDA to reset frequency every other field */
/* Sets the subcarrier DDA to reset frequency every other field */
# define TV_SC_RESET_EVERY_2 (0 << 24)
/** Sets the subcarrier DDA to reset frequency every fourth field */
/* Sets the subcarrier DDA to reset frequency every fourth field */
# define TV_SC_RESET_EVERY_4 (1 << 24)
/** Sets the subcarrier DDA to reset frequency every eighth field */
/* Sets the subcarrier DDA to reset frequency every eighth field */
# define TV_SC_RESET_EVERY_8 (2 << 24)
/** Sets the subcarrier DDA to never reset the frequency */
/* Sets the subcarrier DDA to never reset the frequency */
# define TV_SC_RESET_NEVER (3 << 24)
/** Sets the peak amplitude of the colorburst.*/
/* Sets the peak amplitude of the colorburst.*/
# define TV_BURST_LEVEL_MASK 0x00ff0000
# define TV_BURST_LEVEL_SHIFT 16
/** Sets the increment of the first subcarrier phase generation DDA */
/* Sets the increment of the first subcarrier phase generation DDA */
# define TV_SCDDA1_INC_MASK 0x00000fff
# define TV_SCDDA1_INC_SHIFT 0
 
#define TV_SC_CTL_2 0x68064
/** Sets the rollover for the second subcarrier phase generation DDA */
/* Sets the rollover for the second subcarrier phase generation DDA */
# define TV_SCDDA2_SIZE_MASK 0x7fff0000
# define TV_SCDDA2_SIZE_SHIFT 16
/** Sets the increent of the second subcarrier phase generation DDA */
/* Sets the increent of the second subcarrier phase generation DDA */
# define TV_SCDDA2_INC_MASK 0x00007fff
# define TV_SCDDA2_INC_SHIFT 0
 
#define TV_SC_CTL_3 0x68068
/** Sets the rollover for the third subcarrier phase generation DDA */
/* Sets the rollover for the third subcarrier phase generation DDA */
# define TV_SCDDA3_SIZE_MASK 0x7fff0000
# define TV_SCDDA3_SIZE_SHIFT 16
/** Sets the increent of the third subcarrier phase generation DDA */
/* Sets the increent of the third subcarrier phase generation DDA */
# define TV_SCDDA3_INC_MASK 0x00007fff
# define TV_SCDDA3_INC_SHIFT 0
 
#define TV_WIN_POS 0x68070
/** X coordinate of the display from the start of horizontal active */
/* X coordinate of the display from the start of horizontal active */
# define TV_XPOS_MASK 0x1fff0000
# define TV_XPOS_SHIFT 16
/** Y coordinate of the display from the start of vertical active (NBR) */
/* Y coordinate of the display from the start of vertical active (NBR) */
# define TV_YPOS_MASK 0x00000fff
# define TV_YPOS_SHIFT 0
 
#define TV_WIN_SIZE 0x68074
/** Horizontal size of the display window, measured in pixels*/
/* Horizontal size of the display window, measured in pixels*/
# define TV_XSIZE_MASK 0x1fff0000
# define TV_XSIZE_SHIFT 16
/**
/*
* Vertical size of the display window, measured in pixels.
*
* Must be even for interlaced modes.
2885,7 → 3351,7
# define TV_YSIZE_SHIFT 0
 
#define TV_FILTER_CTL_1 0x68080
/**
/*
* Enables automatic scaling calculation.
*
* If set, the rest of the registers are ignored, and the calculated values can
2892,21 → 3358,21
* be read back from the register.
*/
# define TV_AUTO_SCALE (1 << 31)
/**
/*
* Disables the vertical filter.
*
* This is required on modes more than 1024 pixels wide */
# define TV_V_FILTER_BYPASS (1 << 29)
/** Enables adaptive vertical filtering */
/* Enables adaptive vertical filtering */
# define TV_VADAPT (1 << 28)
# define TV_VADAPT_MODE_MASK (3 << 26)
/** Selects the least adaptive vertical filtering mode */
/* Selects the least adaptive vertical filtering mode */
# define TV_VADAPT_MODE_LEAST (0 << 26)
/** Selects the moderately adaptive vertical filtering mode */
/* Selects the moderately adaptive vertical filtering mode */
# define TV_VADAPT_MODE_MODERATE (1 << 26)
/** Selects the most adaptive vertical filtering mode */
/* Selects the most adaptive vertical filtering mode */
# define TV_VADAPT_MODE_MOST (3 << 26)
/**
/*
* Sets the horizontal scaling factor.
*
* This should be the fractional part of the horizontal scaling factor divided
2918,7 → 3384,7
# define TV_HSCALE_FRAC_SHIFT 0
 
#define TV_FILTER_CTL_2 0x68084
/**
/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
* TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
2925,7 → 3391,7
*/
# define TV_VSCALE_INT_MASK 0x00038000
# define TV_VSCALE_INT_SHIFT 15
/**
/*
* Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
*
* \sa TV_VSCALE_INT_MASK
2934,7 → 3400,7
# define TV_VSCALE_FRAC_SHIFT 0
 
#define TV_FILTER_CTL_3 0x68088
/**
/*
* Sets the integer part of the 3.15 fixed-point vertical scaling factor.
*
* TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
2943,7 → 3409,7
*/
# define TV_VSCALE_IP_INT_MASK 0x00038000
# define TV_VSCALE_IP_INT_SHIFT 15
/**
/*
* Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
*
* For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
2955,7 → 3421,7
 
#define TV_CC_CONTROL 0x68090
# define TV_CC_ENABLE (1 << 31)
/**
/*
* Specifies which field to send the CC data in.
*
* CC data is usually sent in field 0.
2962,19 → 3428,19
*/
# define TV_CC_FID_MASK (1 << 27)
# define TV_CC_FID_SHIFT 27
/** Sets the horizontal position of the CC data. Usually 135. */
/* Sets the horizontal position of the CC data. Usually 135. */
# define TV_CC_HOFF_MASK 0x03ff0000
# define TV_CC_HOFF_SHIFT 16
/** Sets the vertical position of the CC data. Usually 21 */
/* Sets the vertical position of the CC data. Usually 21 */
# define TV_CC_LINE_MASK 0x0000003f
# define TV_CC_LINE_SHIFT 0
 
#define TV_CC_DATA 0x68094
# define TV_CC_RDY (1 << 31)
/** Second word of CC data to be transmitted. */
/* Second word of CC data to be transmitted. */
# define TV_CC_DATA_2_MASK 0x007f0000
# define TV_CC_DATA_2_SHIFT 16
/** First word of CC data to be transmitted. */
/* First word of CC data to be transmitted. */
# define TV_CC_DATA_1_MASK 0x0000007f
# define TV_CC_DATA_1_SHIFT 0
 
2996,6 → 3462,8
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16)
#define DP_PIPE_MASK_CHV (3 << 16)
 
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
3043,32 → 3511,32
#define DP_PLL_FREQ_160MHZ (1 << 16)
#define DP_PLL_FREQ_MASK (3 << 16)
 
/** locked once port is enabled */
/* locked once port is enabled */
#define DP_PORT_REVERSAL (1 << 15)
 
/* eDP */
#define DP_PLL_ENABLE (1 << 14)
 
/** sends the clock on lane 15 of the PEG for debug */
/* sends the clock on lane 15 of the PEG for debug */
#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
 
#define DP_SCRAMBLING_DISABLE (1 << 12)
#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
 
/** limit RGB values to avoid confusing TVs */
/* limit RGB values to avoid confusing TVs */
#define DP_COLOR_RANGE_16_235 (1 << 8)
 
/** Turn on the audio link */
/* Turn on the audio link */
#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
 
/** vs and hs sync polarity */
/* vs and hs sync polarity */
#define DP_SYNC_VS_HIGH (1 << 4)
#define DP_SYNC_HS_HIGH (1 << 3)
 
/** A fantasy */
/* A fantasy */
#define DP_DETECTED (1 << 2)
 
/** The aux channel provides a way to talk to the
/* The aux channel provides a way to talk to the
* signal sink for DDC etc. Max packet size supported
* is 20 bytes in each direction, hence the 5 fixed
* data registers
3178,10 → 3646,10
/* Display & cursor control */
 
/* Pipe A */
#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
#define _PIPEADSL 0x70000
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
3211,6 → 3679,7
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
3224,11 → 3693,12
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
3239,36 → 3709,64
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19)
#define PERF_COUNTER_INTERRUPT_EN (1UL<<19)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17)
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15)
#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
#define PIPE_DPST_EVENT_STATUS (1UL<<7)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
#define PIPE_B_PSR_STATUS_VLV (1UL<<3)
#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3)
#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1)
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_HBLANK_INT_STATUS (1UL<<0)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
 
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
 
#define PIPE_A_OFFSET 0x70000
#define PIPE_B_OFFSET 0x71000
#define PIPE_C_OFFSET 0x72000
#define CHV_PIPE_C_OFFSET 0x74000
/*
* There's actually no pipe EDP. Some pipe registers have
* simply shifted from the pipe to the transcoder, while
* keeping their original offset. Thus we need PIPE_EDP_OFFSET
* to access such registers in transcoder EDP.
*/
#define PIPE_EDP_OFFSET 0x7f000
 
#define _PIPE2(pipe, reg) (dev_priv->info.pipe_offsets[pipe] - \
dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
dev_priv->info.display_mmio_offset)
 
#define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF)
#define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL)
#define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT)
 
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_DITHER_BPC_MASK (7<<5)
3279,23 → 3777,34
#define PIPEMISC_DITHER_ENABLE (1<<4)
#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
#define PIPEMISC_DITHER_TYPE_SP (0<<2)
#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
#define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A)
 
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
#define SPRITED_FLIPDONE_INT_EN (1<<26)
#define SPRITEC_FLIPDONE_INT_EN (1<<25)
#define PLANEB_FLIPDONE_INT_EN (1<<24)
#define SPRITED_FLIP_DONE_INT_EN (1<<26)
#define SPRITEC_FLIP_DONE_INT_EN (1<<25)
#define PLANEB_FLIP_DONE_INT_EN (1<<24)
#define PIPE_PSR_INT_EN (1<<22)
#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
#define SPRITEB_FLIPDONE_INT_EN (1<<18)
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
#define SPRITEB_FLIP_DONE_INT_EN (1<<18)
#define SPRITEA_FLIP_DONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
#define PIPEC_LINE_COMPARE_INT_EN (1<<13)
#define PIPEC_HLINE_INT_EN (1<<12)
#define PIPEC_VBLANK_INT_EN (1<<11)
#define SPRITEF_FLIPDONE_INT_EN (1<<10)
#define SPRITEE_FLIPDONE_INT_EN (1<<9)
#define PLANEC_FLIPDONE_INT_EN (1<<8)
 
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
#define SPRITEF_INVALID_GTT_INT_EN (1<<27)
#define SPRITEE_INVALID_GTT_INT_EN (1<<26)
#define PLANEC_INVALID_GTT_INT_EN (1<<25)
#define CURSORC_INVALID_GTT_INT_EN (1<<24)
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
3305,6 → 3814,11
#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
#define PLANEA_INVALID_GTT_INT_EN (1<<16)
#define DPINVGTT_EN_MASK 0xff0000
#define DPINVGTT_EN_MASK_CHV 0xfff0000
#define SPRITEF_INVALID_GTT_STATUS (1<<11)
#define SPRITEE_INVALID_GTT_STATUS (1<<10)
#define PLANEC_INVALID_GTT_STATUS (1<<9)
#define CURSORC_INVALID_GTT_STATUS (1<<8)
#define CURSORB_INVALID_GTT_STATUS (1<<7)
#define CURSORA_INVALID_GTT_STATUS (1<<6)
#define SPRITED_INVALID_GTT_STATUS (1<<5)
3314,6 → 3828,7
#define SPRITEA_INVALID_GTT_STATUS (1<<1)
#define PLANEA_INVALID_GTT_STATUS (1<<0)
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
 
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
3323,7 → 3838,7
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
 
#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
3331,11 → 3846,11
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
3343,25 → 3858,54
#define DSPFW_HPLL_CURSOR_SHIFT 16
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070)
#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c)
#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070)
#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c)
 
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_16 16
#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_PRECISION_64 (1<<31)
#define DDL_CURSORA_PRECISION_32 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
#define DDL_SPRITEB_PRECISION_64 (1<<23)
#define DDL_SPRITEB_PRECISION_32 (0<<23)
#define DDL_SPRITEB_SHIFT 16
#define DDL_SPRITEA_PRECISION_64 (1<<15)
#define DDL_SPRITEA_PRECISION_32 (0<<15)
#define DDL_SPRITEA_SHIFT 8
#define DDL_PLANEA_PRECISION_64 (1<<7)
#define DDL_PLANEA_PRECISION_32 (0<<7)
#define DDL_PLANEA_SHIFT 0
 
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_PRECISION_64 (1<<31)
#define DDL_CURSORB_PRECISION_32 (0<<31)
#define DDL_CURSORB_SHIFT 24
#define DDL_PLANEB_PRECISION_32 (1<<7)
#define DDL_PLANEB_PRECISION_16 (0<<7)
#define DDL_SPRITED_PRECISION_64 (1<<23)
#define DDL_SPRITED_PRECISION_32 (0<<23)
#define DDL_SPRITED_SHIFT 16
#define DDL_SPRITEC_PRECISION_64 (1<<15)
#define DDL_SPRITEC_PRECISION_32 (0<<15)
#define DDL_SPRITEC_SHIFT 8
#define DDL_PLANEB_PRECISION_64 (1<<7)
#define DDL_PLANEB_PRECISION_32 (0<<7)
#define DDL_PLANEB_SHIFT 0
 
#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
#define DDL_CURSORC_PRECISION_64 (1<<31)
#define DDL_CURSORC_PRECISION_32 (0<<31)
#define DDL_CURSORC_SHIFT 24
#define DDL_SPRITEF_PRECISION_64 (1<<23)
#define DDL_SPRITEF_PRECISION_32 (0<<23)
#define DDL_SPRITEF_SHIFT 16
#define DDL_SPRITEE_PRECISION_64 (1<<15)
#define DDL_SPRITEE_PRECISION_32 (0<<15)
#define DDL_SPRITEE_SHIFT 8
#define DDL_PLANEC_PRECISION_64 (1<<7)
#define DDL_PLANEC_PRECISION_32 (0<<7)
#define DDL_PLANEC_SHIFT 0
 
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
3468,12 → 4012,13
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040)
#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
#define _PIPEA_FRMCOUNT_GM45 0x70040
#define _PIPEA_FLIPCOUNT_GM45 0x70044
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
#define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45)
 
/* Cursor A & B regs */
#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
#define _CURACNTR 0x70080
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
3489,7 → 4034,11
/* New style CUR*CNTR flags */
#define CURSOR_MODE 0x27
#define CURSOR_MODE_DISABLE 0x00
#define CURSOR_MODE_128_32B_AX 0x02
#define CURSOR_MODE_256_32B_AX 0x03
#define CURSOR_MODE_64_32B_AX 0x07
#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
#define MCURSOR_PIPE_SELECT (1 << 28)
#define MCURSOR_PIPE_A 0x00
3496,31 → 4045,37
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
#define _CURBCNTR 0x700c0
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
 
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
 
#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
#define _CURSOR2(pipe, reg) (dev_priv->info.cursor_offsets[(pipe)] - \
dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
dev_priv->info.display_mmio_offset)
 
#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
 
#define CURSOR_A_OFFSET 0x70080
#define CURSOR_B_OFFSET 0x700c0
#define CHV_CURSOR_C_OFFSET 0x700e0
#define IVB_CURSOR_B_OFFSET 0x71080
#define IVB_CURSOR_C_OFFSET 0x72080
 
/* Display A control */
#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
3554,25 → 4109,25
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
 
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _PIPE2(plane, _DSPAADDR)
#define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE)
#define DSPPOS(plane) _PIPE2(plane, _DSPAPOS)
#define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE)
#define DSPSURF(plane) _PIPE2(plane, _DSPASURF)
#define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
 
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
3580,44 → 4135,44
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
 
/* VBIOS flags */
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
#define SWF00 (dev_priv->info.display_mmio_offset + 0x71410)
#define SWF01 (dev_priv->info.display_mmio_offset + 0x71414)
#define SWF02 (dev_priv->info.display_mmio_offset + 0x71418)
#define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c)
#define SWF04 (dev_priv->info.display_mmio_offset + 0x71420)
#define SWF05 (dev_priv->info.display_mmio_offset + 0x71424)
#define SWF06 (dev_priv->info.display_mmio_offset + 0x71428)
#define SWF10 (dev_priv->info.display_mmio_offset + 0x70410)
#define SWF11 (dev_priv->info.display_mmio_offset + 0x70414)
#define SWF14 (dev_priv->info.display_mmio_offset + 0x71420)
#define SWF30 (dev_priv->info.display_mmio_offset + 0x72414)
#define SWF31 (dev_priv->info.display_mmio_offset + 0x72418)
#define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c)
 
/* Pipe B */
#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008)
#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040)
#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044)
#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040)
#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044)
 
 
/* Display B control */
#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184)
#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188)
#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C)
#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190)
#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C)
#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4)
#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
 
/* Sprite A control */
#define _DVSACNTR 0x72180
3866,49 → 4421,46
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
 
 
#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
#define _PIPEA_DATA_M1 0x60030
#define PIPE_DATA_M1_OFFSET 0
#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
#define _PIPEA_DATA_N1 0x60034
#define PIPE_DATA_N1_OFFSET 0
 
#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
#define _PIPEA_DATA_M2 0x60038
#define PIPE_DATA_M2_OFFSET 0
#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
#define _PIPEA_DATA_N2 0x6003c
#define PIPE_DATA_N2_OFFSET 0
 
#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
#define _PIPEA_LINK_M1 0x60040
#define PIPE_LINK_M1_OFFSET 0
#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
#define _PIPEA_LINK_N1 0x60044
#define PIPE_LINK_N1_OFFSET 0
 
#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
#define _PIPEA_LINK_M2 0x60048
#define PIPE_LINK_M2_OFFSET 0
#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
#define _PIPEA_LINK_N2 0x6004c
#define PIPE_LINK_N2_OFFSET 0
 
/* PIPEB timing regs are same start from 0x61000 */
 
#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
#define _PIPEB_DATA_M1 0x61030
#define _PIPEB_DATA_N1 0x61034
#define _PIPEB_DATA_M2 0x61038
#define _PIPEB_DATA_N2 0x6103c
#define _PIPEB_LINK_M1 0x61040
#define _PIPEB_LINK_N1 0x61044
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
 
#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
#define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1)
#define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2)
#define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2)
#define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1)
#define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1)
#define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2)
#define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2)
 
#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
 
#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
 
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
 
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
4025,6 → 4577,7
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
#define GEN8_GT_VECS_IRQ (1<<6)
#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
#define GEN8_GT_VCS1_IRQ (1<<2)
#define GEN8_GT_BCS_IRQ (1<<1)
4052,7 → 4605,7
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
#define GEN8_PIPE_FLIP_DONE (1 << 4)
#define GEN8_PIPE_PRIMARY_FLIP_DONE (1 << 4)
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
4084,12 → 4637,13
#define ILK_ELPIN_409_SELECT (1 << 25)
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
#define FUSE_STRAP 0x42014
#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30)
#define ILK_DISPLAY_DEBUG_DISABLE (1<<29)
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define HSW_CDCLK_LIMIT (1 << 24)
#define ILK_DESKTOP (1<<23)
 
#define ILK_DSPCLK_GATE_D 0x42020
4109,7 → 4663,8
 
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define DPRS_MASK_VBLANK_SRD (1 << 0)
#define HSW_FBCQ_DIS (1 << 22)
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
#define DISP_ARB_CTL 0x45000
4120,6 → 4675,8
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
 
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
4127,9 → 4684,14
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
 
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
 
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3CNTLREG2 0xB020
#define GEN7_L3CNTLREG3 0xB024
 
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
4148,9 → 4710,6
#define HSW_SCRATCH1 0xb038
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
 
#define HSW_FUSE_STRAP 0x42014
#define HSW_CDCLK_LIMIT (1 << 24)
 
/* PCH */
 
/* south display engine interrupt: IBX */
4379,8 → 4938,7
#define _PCH_TRANSA_LINK_M2 0xe0048
#define _PCH_TRANSA_LINK_N2 0xe004c
 
/* Per-transcoder DIP controls */
 
/* Per-transcoder DIP controls (PCH) */
#define _VIDEO_DIP_CTL_A 0xe0200
#define _VIDEO_DIP_DATA_A 0xe0208
#define _VIDEO_DIP_GCP_A 0xe0210
4393,6 → 4951,7
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
/* Per-transcoder DIP controls (VLV) */
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
4401,12 → 4960,19
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
 
#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
 
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
_PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
#define VLV_TVIDEO_DIP_DATA(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
_PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
#define VLV_TVIDEO_DIP_GCP(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
_PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
 
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
4436,17 → 5002,17
#define HSW_VIDEO_DIP_GCP_B 0x61210
 
#define HSW_TVIDEO_DIP_CTL(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
_TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
#define HSW_TVIDEO_DIP_VS_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
#define HSW_TVIDEO_DIP_GCP(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
_TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
 
#define HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1<<31)
4453,7 → 5019,7
#define HSW_STEREO_3D_CTL_B 0x71020
 
#define HSW_STEREO_3D_CTL(trans) \
_TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
_PIPE2(trans, HSW_STEREO_3D_CTL_A)
 
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
4760,6 → 5326,8
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
 
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
4816,6 → 5384,8
 
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
 
#define VLV_PMWGICZ 0x1300a4
 
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
4824,9 → 5394,16
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define VLV_GTLC_WAKE_CTRL 0x130090
#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25)
#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24)
#define VLV_GTLC_ALLOWWAKEREQ (1 << 0)
 
#define VLV_GTLC_PW_STATUS 0x130094
#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80
#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20
#define VLV_GTLC_ALLOWWAKEACK (1 << 0)
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
4833,6 → 5410,7
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
#define VLV_SPAREG2H 0xA194
 
#define GTFIFODBG 0x120000
#define GT_FIFO_SBDROPERR (1<<6)
4852,6 → 5430,7
#define HSW_EDRAM_PRESENT 0x120010
 
#define GEN6_UCGCTL1 0x9400
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
 
4862,9 → 5441,19
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
 
#define GEN6_UCGCTL3 0x9408
 
#define GEN7_UCGCTL4 0x940c
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
 
#define GEN6_RCGCTL1 0x9410
#define GEN6_RCGCTL2 0x9414
#define GEN6_RSTCTL 0x9420
 
#define GEN8_UCGCTL6 0x9430
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
 
#define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
4917,6 → 5506,9
#define GEN6_RP_UP_EI 0xA068
#define GEN6_RP_DOWN_EI 0xA06C
#define GEN6_RP_IDLE_HYSTERSIS 0xA070
#define GEN6_RPDEUHWTC 0xA080
#define GEN6_RPDEUC 0xA084
#define GEN6_RPDEUCSW 0xA088
#define GEN6_RC_STATE 0xA094
#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
4924,11 → 5516,15
#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
#define GEN6_RC_SLEEP 0xA0B0
#define GEN6_RCUBMABDTMR 0xA0B0
#define GEN6_RC1e_THRESHOLD 0xA0B4
#define GEN6_RC6_THRESHOLD 0xA0B8
#define GEN6_RC6p_THRESHOLD 0xA0BC
#define VLV_RCEDATA 0xA0BC
#define GEN6_RC6pp_THRESHOLD 0xA0C0
#define GEN6_PMINTRMSK 0xA168
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294
 
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
4945,14 → 5541,34
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
 
#define CHV_CZ_CLOCK_FREQ_MODE_200 200
#define CHV_CZ_CLOCK_FREQ_MODE_267 267
#define CHV_CZ_CLOCK_FREQ_MODE_320 320
#define CHV_CZ_CLOCK_FREQ_MODE_333 333
#define CHV_CZ_CLOCK_FREQ_MODE_400 400
 
#define GEN7_GT_SCRATCH_BASE 0x4F100
#define GEN7_GT_SCRATCH_REG_NUM 8
 
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define VLV_GFX_CLK_STATUS_BIT (1<<3)
#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2)
 
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15)
#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
#define VLV_RENDER_RC0_COUNT_EN (1<<4)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
#define VLV_GT_RENDER_RC6 0x138108
#define VLV_GT_MEDIA_RC6 0x13810C
 
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
#define VLV_RENDER_C0_COUNT_REG 0x138118
#define VLV_MEDIA_C0_COUNT_REG 0x13811C
 
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
5006,6 → 5622,10
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
 
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
5017,7 → 5637,7
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
 
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
5178,11 → 5798,12
#define TRANS_DDI_FUNC_CTL_B 0x61400
#define TRANS_DDI_FUNC_CTL_C 0x62400
#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
TRANS_DDI_FUNC_CTL_B)
#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A)
 
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28)
#define TRANS_DDI_PORT_SHIFT 28
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
5203,6 → 5824,7
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
#define TRANS_DDI_BFI_ENABLE (1<<4)
 
/* DisplayPort Transport Control */
5212,6 → 5834,7
#define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27)
#define DP_TP_CTL_FORCE_ACT (1<<25)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
5227,7 → 5850,12
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_ACT_SENT (1<<24)
#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
 
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
5234,7 → 5862,6
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
/* Haswell */
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
5244,16 → 5871,6
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
/* Broadwell */
#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
5311,20 → 5928,31
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SSC (1<<28)
#define SPLL_PLL_NON_SSC (2<<28)
#define SPLL_PLL_LCPLL (3<<28)
#define SPLL_PLL_REF_MASK (3<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
#define SPLL_PLL_FREQ_2700MHz (2<<26)
#define SPLL_PLL_FREQ_MASK (3<<26)
 
/* WRPLL */
#define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
#define WRPLL_PLL_SSC (1<<28)
#define WRPLL_PLL_NON_SSC (2<<28)
#define WRPLL_PLL_LCPLL (3<<28)
#define WRPLL_PLL_REF_MASK (3<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
#define WRPLL_DIVIDER_POST(x) ((x)<<8)
#define WRPLL_DIVIDER_POST_MASK (0x3f<<8)
#define WRPLL_DIVIDER_POST_SHIFT 8
#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
#define WRPLL_DIVIDER_FB_SHIFT 16
#define WRPLL_DIVIDER_FB_MASK (0xff<<16)
 
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
5334,9 → 5962,11
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
#define PORT_CLK_SEL_MASK (7<<29)
 
/* Transcoder clock selection */
#define TRANS_CLK_SEL_A 0x46140
5346,10 → 5976,12
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
 
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
_TRANSB_MSA_MISC)
#define TRANSA_MSA_MISC 0x60410
#define TRANSB_MSA_MISC 0x61410
#define TRANSC_MSA_MISC 0x62410
#define TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC)
 
#define TRANS_MSA_SYNC_CLK (1<<0)
#define TRANS_MSA_6_BPC (0<<5)
#define TRANS_MSA_8_BPC (1<<5)
5372,7 → 6004,10
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
 
#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW 0x138144
#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0)
5389,6 → 6024,8
 
/* SFUSE_STRAP */
#define SFUSE_STRAP 0xc2014
#define SFUSE_STRAP_FUSE_LOCK (1<<13)
#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
5451,7 → 6088,8
 
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
_MIPIB_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + B */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
5493,7 → 6131,8
 
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
_MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
 
5502,9 → 6141,10
 
/* MIPI DSI Controller and D-PHY registers */
 
#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
_MIPIB_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
5512,12 → 6152,14
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
 
#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
_MIPIB_INTR_STAT)
#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
_MIPIB_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
5551,9 → 6193,10
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
 
#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
_MIPIB_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
5574,78 → 6217,94
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
 
#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
_MIPIB_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
 
#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
_MIPIB_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
 
#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
_MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
 
#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
_MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
 
#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
_MIPIB_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
 
#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
_MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
 
/* regs below are bits 15:0 */
#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
 
#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
_MIPIB_HBP_COUNT)
 
#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
_MIPIB_HFP_COUNT)
 
#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
 
#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
 
#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
_MIPIB_VBP_COUNT)
 
#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
_MIPIB_VFP_COUNT)
 
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
 
/* regs above are bits 15:0 */
 
#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
_MIPIB_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
5654,27 → 6313,31
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
 
#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
_MIPIB_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
 
#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
_MIPIB_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
 
#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
 
#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
_MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
5682,9 → 6345,10
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
 
#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
_MIPIB_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
5694,28 → 6358,33
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
 
#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
_MIPIB_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
 
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
_MIPIB_LP_GEN_DATA)
 
/* bits 31:0 */
#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
_MIPIB_HS_GEN_DATA)
 
#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
_MIPIB_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
_MIPIB_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
5726,9 → 6395,10
#define DATA_TYPE_MASK (3f << 0)
/* data type values, see include/video/mipi_display.h */
 
#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
_MIPIB_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
5744,16 → 6414,18
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
 
#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
_MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
 
#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
_MIPIB_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
5764,34 → 6436,41
#define PREPARE_COUNT_MASK (0x3f << 0)
 
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
_MIPIB_DBI_BW_CTRL)
 
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb088)
#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
_MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
 
#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
_MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
 
#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
_MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
_MIPIB_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
 
/* XXX: only pipe A ?!? */
#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
5805,9 → 6484,10
 
/* MIPI adapter registers */
 
#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
_MIPIB_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
5819,22 → 6499,25
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
 
#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
_MIPIB_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
 
#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
_MIPIB_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
 
#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
_MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
5841,20 → 6524,27
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
 
#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
_MIPIB_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
 
#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
#define MIPI_READ_DATA_RETURN(pipe, n) \
(_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
#define MIPI_READ_DATA_RETURN(tc, n) \
(_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+ 4 * (n)) /* n: 0...7 */
 
#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
_MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
 
/* For UMS only (deprecated): */
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
 
#endif /* _I915_REG_H_ */
/drivers/video/drm/i915/i915_trace.h
27,5 → 27,8
#define trace_i915_vma_bind(a, b)
#define trace_i915_vma_unbind(a)
#define trace_i915_gem_object_clflush(a)
#define trace_i915_gem_evict(dev, min_size, alignment, flags)
#define trace_i915_gem_evict_vm(vm)
#define trace_i915_gem_evict_everything(dev)
 
#endif
/drivers/video/drm/i915/intel_bios.c
24,8 → 24,8
* Eric Anholt <eric@anholt.net>
*
*/
#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_bios.h"
48,13 → 48,19
total = bdb->bdb_size;
 
/* walk the sections looking for section_id */
while (index < total) {
while (index + 3 < total) {
current_id = *(base + index);
index++;
 
current_size = *((u16 *)(base + index));
index += 2;
 
if (index + current_size > total)
return NULL;
 
if (current_id == section_id)
return base + index;
 
index += current_size;
}
 
205,7 → 211,7
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int i, downclock;
int i, downclock, drrs_mode;
 
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
217,6 → 223,28
 
panel_type = lvds_options->panel_type;
 
drrs_mode = (lvds_options->dps_panel_type_bits
>> (panel_type * 2)) & MODE_MASK;
/*
* VBT has static DRRS = 0 and seamless DRRS = 2.
* The below piece of code is required to adjust vbt.drrs_type
* to match the enum drrs_support_type.
*/
switch (drrs_mode) {
case 0:
dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
DRM_DEBUG_KMS("DRRS supported mode is static\n");
break;
case 2:
dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
break;
default:
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
break;
}
 
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
return;
258,7 → 286,7
downclock = dvo_timing->clock;
}
 
if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = downclock * 10;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
298,13 → 326,21
 
entry = &backlight_data->data[panel_type];
 
dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!dev_priv->vbt.backlight.present) {
DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
entry->type);
return;
}
 
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
entry->min_brightness,
dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]);
}
 
317,7 → 353,7
struct drm_display_mode *panel_fixed_mode;
int index;
 
index = i915_vbt_sdvo_panel_type;
index = i915.vbt_sdvo_panel_type;
if (index == -2) {
DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
515,6 → 551,16
 
if (driver->dual_frequency)
dev_priv->render_reclock_avail = true;
 
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
* This is because, VBT is configured in such a way that
* static DRRS is 0 and DRRS not supported is represented by
* driver->drrs_enabled=false
*/
if (!driver->drrs_enabled)
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
}
 
static void
549,65 → 595,291
 
dev_priv->vbt.edp_pps = *edp_pps;
 
dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
DP_LINK_BW_1_62;
switch (edp_link_params->rate) {
case EDP_RATE_1_62:
dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
break;
case EDP_RATE_2_7:
dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
edp_link_params->rate);
break;
}
 
switch (edp_link_params->lanes) {
case 0:
case EDP_LANE_1:
dev_priv->vbt.edp_lanes = 1;
break;
case 1:
case EDP_LANE_2:
dev_priv->vbt.edp_lanes = 2;
break;
case 3:
default:
case EDP_LANE_4:
dev_priv->vbt.edp_lanes = 4;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
edp_link_params->lanes);
break;
}
 
switch (edp_link_params->preemphasis) {
case 0:
case EDP_PREEMPHASIS_NONE:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
break;
case 1:
case EDP_PREEMPHASIS_3_5dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
break;
case 2:
case EDP_PREEMPHASIS_6dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
break;
case 3:
case EDP_PREEMPHASIS_9_5dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
edp_link_params->preemphasis);
break;
}
 
switch (edp_link_params->vswing) {
case 0:
case EDP_VSWING_0_4V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
break;
case 1:
case EDP_VSWING_0_6V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
break;
case 2:
case EDP_VSWING_0_8V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
break;
case 3:
case EDP_VSWING_1_2V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
edp_link_params->vswing);
break;
}
}
 
static u8 *goto_next_sequence(u8 *data, int *size)
{
u16 len;
int tmp = *size;
 
if (--tmp < 0)
return NULL;
 
/* goto first element */
data++;
while (1) {
switch (*data) {
case MIPI_SEQ_ELEM_SEND_PKT:
/*
* skip by this element payload size
* skip elem id, command flag and data type
*/
tmp -= 5;
if (tmp < 0)
return NULL;
 
data += 3;
len = *((u16 *)data);
 
tmp -= len;
if (tmp < 0)
return NULL;
 
/* skip by len */
data = data + 2 + len;
break;
case MIPI_SEQ_ELEM_DELAY:
/* skip by elem id, and delay is 4 bytes */
tmp -= 5;
if (tmp < 0)
return NULL;
 
data += 5;
break;
case MIPI_SEQ_ELEM_GPIO:
tmp -= 3;
if (tmp < 0)
return NULL;
 
data += 3;
break;
default:
DRM_ERROR("Unknown element\n");
return NULL;
}
 
/* end of sequence ? */
if (*data == 0)
break;
}
 
/* goto next sequence or end of block byte */
if (--tmp < 0)
return NULL;
 
data++;
 
/* update amount of data left for the sequence block to be parsed */
*size = tmp;
return data;
}
 
static void
parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_mipi *mipi;
struct bdb_mipi_config *start;
struct bdb_mipi_sequence *sequence;
struct mipi_config *config;
struct mipi_pps_data *pps;
u8 *data, *seq_data;
int i, panel_id, seq_size;
u16 block_size;
 
mipi = find_section(bdb, BDB_MIPI);
if (!mipi) {
DRM_DEBUG_KMS("No MIPI BDB found");
/* parse MIPI blocks only if LFP type is MIPI */
if (!dev_priv->vbt.has_mipi)
return;
 
/* Initialize this to undefined indicating no generic MIPI support */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
 
/* Block #40 is already parsed and panel_fixed_mode is
* stored in dev_priv->lfp_lvds_vbt_mode
* resuse this when needed
*/
 
/* Parse #52 for panel index used from panel_type already
* parsed
*/
start = find_section(bdb, BDB_MIPI_CONFIG);
if (!start) {
DRM_DEBUG_KMS("No MIPI config BDB found");
return;
}
 
/* XXX: add more info */
dev_priv->vbt.dsi.panel_id = mipi->panel_id;
DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
panel_type);
 
/*
* get hold of the correct configuration block and pps data as per
* the panel_type as index
*/
config = &start->config[panel_type];
pps = &start->pps[panel_type];
 
/* store as of now full data. Trim when we realise all is not needed */
dev_priv->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
if (!dev_priv->vbt.dsi.config)
return;
 
dev_priv->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
if (!dev_priv->vbt.dsi.pps) {
kfree(dev_priv->vbt.dsi.config);
return;
}
 
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
 
/* Check if we have sequence block as well */
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
if (!sequence) {
DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
return;
}
 
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
 
block_size = get_blocksize(sequence);
 
/*
* parse the sequence block for individual sequences
*/
dev_priv->vbt.dsi.seq_version = sequence->version;
 
seq_data = &sequence->data[0];
 
/*
* sequence block is variable length and hence we need to parse and
* get the sequence data for specific panel id
*/
for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
panel_id = *seq_data;
seq_size = *((u16 *) (seq_data + 1));
if (panel_id == panel_type)
break;
 
/* skip the sequence including seq header of 3 bytes */
seq_data = seq_data + 3 + seq_size;
if ((seq_data - &sequence->data[0]) > block_size) {
DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
return;
}
}
 
if (i == MAX_MIPI_CONFIGURATIONS) {
DRM_ERROR("Sequence block detected but no valid configuration\n");
return;
}
 
/* check if found sequence is completely within the sequence block
* just being paranoid */
if (seq_size > block_size) {
DRM_ERROR("Corrupted sequence/size, bailing out\n");
return;
}
 
/* skip the panel id(1 byte) and seq size(2 bytes) */
dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
if (!dev_priv->vbt.dsi.data)
return;
 
/*
* loop into the sequence data and split into multiple sequneces
* There are only 5 types of sequences as of now
*/
data = dev_priv->vbt.dsi.data;
dev_priv->vbt.dsi.size = seq_size;
 
/* two consecutive 0x00 indicate end of all sequences */
while (1) {
int seq_id = *data;
if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
dev_priv->vbt.dsi.sequence[seq_id] = data;
DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
} else {
DRM_ERROR("undefined sequence\n");
goto err;
}
 
/* partial parsing to skip elements */
data = goto_next_sequence(data, &seq_size);
 
if (data == NULL) {
DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
goto err;
}
 
if (*data == 0)
break; /* end of sequence reached */
}
 
DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
return;
err:
kfree(dev_priv->vbt.dsi.data);
dev_priv->vbt.dsi.data = NULL;
 
/* error during parsing so set all pointers to null
* because of partial parsing */
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
 
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
struct bdb_header *bdb)
{
788,6 → 1060,15
/* skip the device block if device type is invalid */
continue;
}
 
if (p_child->common.dvo_port >= DVO_PORT_MIPIA
&& p_child->common.dvo_port <= DVO_PORT_MIPID
&&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
DRM_DEBUG_KMS("Found MIPI as LFP\n");
dev_priv->vbt.has_mipi = 1;
dev_priv->vbt.dsi.port = p_child->common.dvo_port;
}
 
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
memcpy((void *)child_dev_ptr, (void *)p_child,
804,6 → 1085,9
 
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
/* Default to having backlight */
dev_priv->vbt.backlight.present = true;
 
/* LFP panel data */
dev_priv->vbt.lvds_dither = 1;
dev_priv->vbt.lvds_vbt = 0;
838,7 → 1122,46
}
}
 
static struct bdb_header *validate_vbt(char *base, size_t size,
struct vbt_header *vbt,
const char *source)
{
size_t offset;
struct bdb_header *bdb;
 
if (vbt == NULL) {
DRM_DEBUG_DRIVER("VBT signature missing\n");
return NULL;
}
 
offset = (char *)vbt - base;
if (offset + sizeof(struct vbt_header) > size) {
DRM_DEBUG_DRIVER("VBT header incomplete\n");
return NULL;
}
 
if (memcmp(vbt->signature, "$VBT", 4)) {
DRM_DEBUG_DRIVER("VBT invalid signature\n");
return NULL;
}
 
offset += vbt->bdb_offset;
if (offset + sizeof(struct bdb_header) > size) {
DRM_DEBUG_DRIVER("BDB header incomplete\n");
return NULL;
}
 
bdb = (struct bdb_header *)(base + offset);
if (offset + bdb->bdb_size > size) {
DRM_DEBUG_DRIVER("BDB incomplete\n");
return NULL;
}
 
DRM_DEBUG_KMS("Using VBT from %s: %20s\n",
source, vbt->signature);
return bdb;
}
 
/**
* intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
862,20 → 1185,13
init_vbt_defaults(dev_priv);
 
/* XXX Should this validation be moved to intel_opregion.c? */
if (dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
vbt->signature);
bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
} else
dev_priv->opregion.vbt = NULL;
}
if (dev_priv->opregion.vbt)
bdb = validate_vbt((char *)dev_priv->opregion.header, OPREGION_SIZE,
(struct vbt_header *)dev_priv->opregion.vbt,
"OpRegion");
 
if (bdb == NULL) {
struct vbt_header *vbt = NULL;
size_t size;
int i;
size_t i, size;
 
bios = pci_map_rom(pdev, &size);
if (!bios)
883,19 → 1199,18
 
/* Scour memory looking for the VBT signature */
for (i = 0; i + 4 < size; i++) {
if (!memcmp(bios + i, "$VBT", 4)) {
vbt = (struct vbt_header *)(bios + i);
if (memcmp(bios + i, "$VBT", 4) == 0) {
bdb = validate_vbt(bios, size,
(struct vbt_header *)(bios + i),
"PCI ROM");
break;
}
}
 
if (!vbt) {
DRM_DEBUG_DRIVER("VBT signature missing\n");
if (!bdb) {
pci_unmap_rom(pdev, bios);
return -1;
}
 
bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
}
 
/* Grab useful general definitions */
/drivers/video/drm/i915/intel_bios.h
104,7 → 104,8
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
#define BDB_MIPI 50
#define BDB_MIPI_CONFIG 52
#define BDB_MIPI_SEQUENCE 53
#define BDB_SKIP 254 /* VBIOS private block, ignore */
 
struct bdb_general_features {
281,6 → 282,9
union child_device_config devices[0];
} __packed;
 
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
#define MODE_MASK 0x3
 
struct bdb_lvds_options {
u8 panel_type;
u8 rsvd1;
293,6 → 297,18
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
/* LVDS Panel channel bits stored here */
u32 lvds_panel_channel_bits;
/* LVDS SSC (Spread Spectrum Clock) bits stored here. */
u16 ssc_bits;
u16 ssc_freq;
u16 ssc_ddt;
/* Panel color depth defined here */
u16 panel_color_depth;
/* LVDS panel type bits stored here */
u32 dps_panel_type_bits;
/* LVDS backlight control type bits stored here */
u32 blt_control_type_bits;
} __packed;
 
/* LFP pointer table contains entries to the struct below */
373,6 → 389,9
struct bdb_lvds_lfp_data_entry data[16];
} __packed;
 
#define BDB_BACKLIGHT_TYPE_NONE 0
#define BDB_BACKLIGHT_TYPE_PWM 2
 
struct bdb_lfp_backlight_data_entry {
u8 type:2;
u8 active_low_pwm:1;
478,6 → 497,20
 
u8 hdmi_termination;
u8 custom_vbt_version;
/* Driver features data block */
u16 rmpm_enabled:1;
u16 s2ddt_enabled:1;
u16 dpst_enabled:1;
u16 bltclt_enabled:1;
u16 adb_enabled:1;
u16 drrs_enabled:1;
u16 grs_enabled:1;
u16 gpmt_enabled:1;
u16 tbt_enabled:1;
u16 psr_enabled:1;
u16 ips_enabled:1;
u16 reserved3:4;
u16 pc_feature_valid:1;
} __packed;
 
#define EDP_18BPP 0
710,45 → 743,195
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
#define DVO_PORT_MIPIA 21
#define DVO_PORT_MIPIB 22
#define DVO_PORT_MIPIC 23
#define DVO_PORT_MIPID 24
 
/* MIPI DSI panel info */
struct bdb_mipi {
/* Block 52 contains MIPI Panel info
* 6 such enteries will there. Index into correct
* entery is based on the panel_index in #40 LFP
*/
#define MAX_MIPI_CONFIGURATIONS 6
 
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
#define MIPI_DSI_GENERIC_PANEL_ID 1
 
struct mipi_config {
u16 panel_id;
u16 bridge_revision;
 
/* General params */
u32 dithering:1;
u32 bpp_pixel_format:1;
/* General Params */
u32 enable_dithering:1;
u32 rsvd1:1;
u32 dphy_valid:1;
u32 resvd2:28;
u32 is_bridge:1;
 
u16 port_info;
u16 rsvd3:2;
u16 num_lanes:2;
u16 rsvd4:12;
u32 panel_arch_type:2;
u32 is_cmd_mode:1;
 
/* DSI config */
u16 virt_ch_num:2;
u16 vtm:2;
u16 rsvd5:12;
#define NON_BURST_SYNC_PULSE 0x1
#define NON_BURST_SYNC_EVENTS 0x2
#define BURST_MODE 0x3
u32 video_transfer_mode:2;
 
u32 dsi_clock;
u32 cabc_supported:1;
u32 pwm_blc:1;
 
/* Bit 13:10 */
#define PIXEL_FORMAT_RGB565 0x1
#define PIXEL_FORMAT_RGB666 0x2
#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
#define PIXEL_FORMAT_RGB888 0x4
u32 videomode_color_format:4;
 
/* Bit 15:14 */
#define ENABLE_ROTATION_0 0x0
#define ENABLE_ROTATION_90 0x1
#define ENABLE_ROTATION_180 0x2
#define ENABLE_ROTATION_270 0x3
u32 rotation:2;
u32 bta_enabled:1;
u32 rsvd2:15;
 
/* 2 byte Port Description */
#define DUAL_LINK_NOT_SUPPORTED 0
#define DUAL_LINK_FRONT_BACK 1
#define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2;
u16 lane_cnt:2;
u16 rsvd3:12;
 
u16 rsvd4;
 
u8 rsvd5[5];
u32 dsi_ddr_clk;
u32 bridge_ref_clk;
u16 rsvd_pwr;
 
/* Dphy Params */
u32 prepare_cnt:5;
u32 rsvd6:3;
#define BYTE_CLK_SEL_20MHZ 0
#define BYTE_CLK_SEL_10MHZ 1
#define BYTE_CLK_SEL_5MHZ 2
u8 byte_clk_sel:2;
 
u8 rsvd6:6;
 
/* DPHY Flags */
u16 dphy_param_valid:1;
u16 eot_pkt_disabled:1;
u16 enable_clk_stop:1;
u16 rsvd7:13;
 
u32 hs_tx_timeout;
u32 lp_rx_timeout;
u32 turn_around_timeout;
u32 device_reset_timer;
u32 master_init_timer;
u32 dbi_bw_timer;
u32 lp_byte_clk_val;
 
/* 4 byte Dphy Params */
u32 prepare_cnt:6;
u32 rsvd8:2;
u32 clk_zero_cnt:8;
u32 trail_cnt:5;
u32 rsvd7:3;
u32 rsvd9:3;
u32 exit_zero_cnt:6;
u32 rsvd8:2;
u32 rsvd10:2;
 
u32 clk_lane_switch_cnt;
u32 hl_switch_cnt;
u32 lp_byte_clk;
u32 clk_lane_switch_cnt;
 
u32 rsvd11[6];
 
/* timings based on dphy spec */
u8 tclk_miss;
u8 tclk_post;
u8 rsvd12;
u8 tclk_pre;
u8 tclk_prepare;
u8 tclk_settle;
u8 tclk_term_enable;
u8 tclk_trail;
u16 tclk_prepare_clkzero;
u8 rsvd13;
u8 td_term_enable;
u8 teot;
u8 ths_exit;
u8 ths_prepare;
u16 ths_prepare_hszero;
u8 rsvd14;
u8 ths_settle;
u8 ths_skip;
u8 ths_trail;
u8 tinit;
u8 tlpx;
u8 rsvd15[3];
 
/* GPIOs */
u8 panel_enable;
u8 bl_enable;
u8 pwm_enable;
u8 reset_r_n;
u8 pwr_down_r;
u8 stdby_r_n;
 
} __packed;
 
/* Block 52 contains MIPI configuration block
* 6 * bdb_mipi_config, followed by 6 pps data
* block below
*
* all delays has a unit of 100us
*/
struct mipi_pps_data {
u16 panel_on_delay;
u16 bl_enable_delay;
u16 bl_disable_delay;
u16 panel_off_delay;
u16 panel_power_cycle_delay;
};
 
struct bdb_mipi_config {
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
};
 
/* Block 53 contains MIPI sequences as needed by the panel
* for enabling it. This block can be variable in size and
* can be maximum of 6 blocks
*/
struct bdb_mipi_sequence {
u8 version;
u8 data[0];
};
 
/* MIPI Sequnece Block definitions */
enum mipi_seq {
MIPI_SEQ_UNDEFINED = 0,
MIPI_SEQ_ASSERT_RESET,
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
MIPI_SEQ_DEASSERT_RESET,
MIPI_SEQ_MAX
};
 
enum mipi_seq_element {
MIPI_SEQ_ELEM_UNDEFINED = 0,
MIPI_SEQ_ELEM_SEND_PKT,
MIPI_SEQ_ELEM_DELAY,
MIPI_SEQ_ELEM_GPIO,
MIPI_SEQ_ELEM_STATUS,
MIPI_SEQ_ELEM_MAX
};
 
enum mipi_gpio_pin_index {
MIPI_GPIO_UNDEFINED = 0,
MIPI_GPIO_PANEL_ENABLE,
MIPI_GPIO_BL_ENABLE,
MIPI_GPIO_PWM_ENABLE,
MIPI_GPIO_RESET_N,
MIPI_GPIO_PWR_DOWN_R,
MIPI_GPIO_STDBY_RST_N,
MIPI_GPIO_MAX
};
 
#endif /* _I830_BIOS_H_ */
/drivers/video/drm/i915/intel_crt.c
67,8 → 67,13
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(crt->adpa_reg);
 
if (!(tmp & ADPA_DAC_ENABLE))
131,6 → 136,18
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
 
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
I915_WRITE(SPLL_CTL,
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
POSTING_READ(SPLL_CTL);
udelay(20);
}
 
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
138,28 → 155,49
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
 
temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
if (INTEL_INFO(dev)->gen >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
/* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
 
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
 
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
adpa |= ADPA_DAC_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_SUSPEND:
temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_OFF:
temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
}
 
I915_WRITE(crt->adpa_reg, temp);
I915_WRITE(crt->adpa_reg, adpa);
}
 
static void intel_disable_crt(struct intel_encoder *encoder)
167,6 → 205,20
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
 
 
static void hsw_crt_post_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
 
DRM_DEBUG_KMS("Disabling SPLL\n");
val = I915_READ(SPLL_CTL);
WARN_ON(!(val & SPLL_PLL_ENABLE));
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
}
 
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct intel_crt *crt = intel_encoder_to_crt(encoder);
261,43 → 313,13
if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
 
return true;
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) {
pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2;
}
 
static void intel_crt_mode_set(struct intel_encoder *encoder)
{
 
struct drm_device *dev = encoder->base.dev;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
 
if (INTEL_INFO(dev)->gen >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
/* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
 
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
 
I915_WRITE(crt->adpa_reg, adpa);
return true;
}
 
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
629,14 → 651,21
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, drm_get_connector_name(connector),
connector->base.id, connector->name,
force);
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
644,26 → 673,35
*/
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
status = connector_status_connected;
goto out;
} else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
}
 
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
if (intel_crt_detect_ddc(connector)) {
status = connector_status_connected;
goto out;
}
 
/* Load detection is broken on HPD capable machines. Whoever wants a
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
if (I915_HAS_HOTPLUG(dev))
return connector_status_disconnected;
if (I915_HAS_HOTPLUG(dev)) {
status = connector_status_disconnected;
goto out;
}
 
if (!force)
return connector->status;
if (!force) {
status = connector->status;
goto out;
}
 
drm_modeset_acquire_init(&ctx, 0);
 
/* for pre-945g platforms use load detect */
if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
672,6 → 710,11
} else
status = connector_status_unknown;
 
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
 
out:
intel_display_power_put(dev_priv, power_domain);
return status;
}
 
685,17 → 728,28
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
int ret;
struct i2c_adapter *i2c;
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
goto out;
 
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
return intel_crt_ddc_get_modes(connector, i2c);
ret = intel_crt_ddc_get_modes(connector, i2c);
 
out:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static int intel_crt_set_property(struct drm_connector *connector,
777,7 → 831,7
intel_connector_attach_encoder(intel_connector, &crt->base);
 
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
797,7 → 851,6
crt->adpa_reg = ADPA;
 
crt->base.compute_config = intel_crt_compute_config;
crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev))
805,15 → 858,18
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.pre_enable = hsw_crt_pre_enable;
crt->base.post_disable = hsw_crt_post_disable;
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
 
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
drm_sysfs_connector_add(connector);
drm_connector_register(connector);
 
if (!I915_HAS_HOTPLUG(dev))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
834,4 → 890,6
 
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
}
 
intel_crt_reset(connector);
}
/drivers/video/drm/i915/intel_ddi.c
76,12 → 76,12
0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
0x00AAAFFF, 0x000E000A,
0x00FFFFFF, 0x00020011,
0x00DB6FFF, 0x0005000F,
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
0x00FFFFFF, 0x000A000C,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
 
89,12 → 89,12
0x00FFFFFF, 0x0007000E, /* DP parameters */
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
0x80B2CFFF, 0x001B0002,
0x00FFFFFF, 0x000E000A,
0x00D75FFF, 0x00180004,
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
0x80FFFFFF, 0x001B0002,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
 
116,7 → 116,10
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
if (type == INTEL_OUTPUT_DP_MST) {
struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
return intel_dig_port->port;
} else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
277,7 → 280,8
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
 
/* Configure Port Clock Select */
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
 
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
364,19 → 368,8
DRM_ERROR("FDI link training failed!\n");
}
 
static void intel_ddi_mode_set(struct intel_encoder *encoder)
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int port = intel_ddi_get_encoder_port(encoder);
int pipe = crtc->pipe;
int type = encoder->type;
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
 
DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
 
crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
385,34 → 378,8
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
 
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
pipe_name(crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(&encoder->base, adjusted_mode);
}
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic
* and a new set of registers, so we leave it for future
* patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
intel_write_eld(&encoder->base, adjusted_mode);
}
 
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
}
 
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
{
434,53 → 401,6
return ret;
}
 
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t val;
 
switch (intel_crtc->ddi_pll_sel) {
case PORT_CLK_SEL_SPLL:
plls->spll_refcount--;
if (plls->spll_refcount == 0) {
DRM_DEBUG_KMS("Disabling SPLL\n");
val = I915_READ(SPLL_CTL);
WARN_ON(!(val & SPLL_PLL_ENABLE));
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
}
break;
case PORT_CLK_SEL_WRPLL1:
plls->wrpll1_refcount--;
if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Disabling WRPLL 1\n");
val = I915_READ(WRPLL_CTL1);
WARN_ON(!(val & WRPLL_PLL_ENABLE));
I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL1);
}
break;
case PORT_CLK_SEL_WRPLL2:
plls->wrpll2_refcount--;
if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Disabling WRPLL 2\n");
val = I915_READ(WRPLL_CTL2);
WARN_ON(!(val & WRPLL_PLL_ENABLE));
I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL2);
}
break;
}
 
WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
 
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
}
 
#define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000)
 
633,6 → 553,96
/* Otherwise a < c && b >= d, do nothing */
}
 
static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
int reg)
{
int refclk = LC_FREQ;
int n, p, r;
u32 wrpll;
 
wrpll = I915_READ(reg);
switch (wrpll & WRPLL_PLL_REF_MASK) {
case WRPLL_PLL_SSC:
case WRPLL_PLL_NON_SSC:
/*
* We could calculate spread here, but our checking
* code only cares about 5% accuracy, and spread is a max of
* 0.5% downspread.
*/
refclk = 135;
break;
case WRPLL_PLL_LCPLL:
refclk = LC_FREQ;
break;
default:
WARN(1, "bad wrpll refclk\n");
return 0;
}
 
r = wrpll & WRPLL_DIVIDER_REF_MASK;
p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
 
/* Convert to KHz, p & r have a fixed point portion */
return (refclk * n * 100) / (p * r);
}
 
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
u32 val, pll;
 
val = pipe_config->ddi_pll_sel;
switch (val & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_LCPLL_810:
link_clock = 81000;
break;
case PORT_CLK_SEL_LCPLL_1350:
link_clock = 135000;
break;
case PORT_CLK_SEL_LCPLL_2700:
link_clock = 270000;
break;
case PORT_CLK_SEL_WRPLL1:
link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
break;
case PORT_CLK_SEL_WRPLL2:
link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
break;
case PORT_CLK_SEL_SPLL:
pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
if (pll == SPLL_PLL_FREQ_810MHz)
link_clock = 81000;
else if (pll == SPLL_PLL_FREQ_1350MHz)
link_clock = 135000;
else if (pll == SPLL_PLL_FREQ_2700MHz)
link_clock = 270000;
else {
WARN(1, "bad spll freq\n");
return;
}
break;
default:
WARN(1, "bad port clock sel\n");
return;
}
 
pipe_config->port_clock = link_clock * 2;
 
if (pipe_config->has_pch_encoder)
pipe_config->adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
else if (pipe_config->has_dp_encoder)
pipe_config->adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
 
static void
intel_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
708,173 → 718,37
{
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
enum pipe pipe = intel_crtc->pipe;
int clock = intel_crtc->config.port_clock;
 
intel_ddi_put_crtc_pll(crtc);
intel_put_shared_dpll(intel_crtc);
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
case DP_LINK_BW_2_7:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
case DP_LINK_BW_5_4:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
default:
DRM_ERROR("Link bandwidth %d unsupported\n",
intel_dp->link_bw);
return false;
}
 
} else if (type == INTEL_OUTPUT_HDMI) {
uint32_t reg, val;
if (type == INTEL_OUTPUT_HDMI) {
struct intel_shared_dpll *pll;
uint32_t val;
unsigned p, n2, r2;
 
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
 
if (val == I915_READ(WRPLL_CTL1)) {
DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL1;
} else if (val == I915_READ(WRPLL_CTL2)) {
DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL2;
} else if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL1;
} else if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL2;
} else {
DRM_ERROR("No WRPLLs available!\n");
return false;
}
intel_crtc->config.dpll_hw_state.wrpll = val;
 
DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
clock, p, n2, r2);
 
if (reg == WRPLL_CTL1) {
plls->wrpll1_refcount++;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
} else {
plls->wrpll2_refcount++;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
}
 
} else if (type == INTEL_OUTPUT_ANALOG) {
if (plls->spll_refcount == 0) {
DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
pipe_name(pipe));
plls->spll_refcount++;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
} else {
DRM_ERROR("SPLL already in use\n");
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
 
} else {
WARN(1, "Invalid DDI encoder type %d\n", type);
return false;
intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
 
return true;
}
 
/*
* To be called after intel_ddi_pll_select(). That one selects the PLL to be
* used, this one actually enables the PLL.
*/
void intel_ddi_pll_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int clock = crtc->config.port_clock;
uint32_t reg, cur_val, new_val;
int refcount;
const char *pll_name;
uint32_t enable_bit = (1 << 31);
unsigned int p, n2, r2;
 
BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
 
switch (crtc->ddi_pll_sel) {
case PORT_CLK_SEL_LCPLL_2700:
case PORT_CLK_SEL_LCPLL_1350:
case PORT_CLK_SEL_LCPLL_810:
/*
* LCPLL should always be enabled at this point of the mode set
* sequence, so nothing to do.
*/
return;
 
case PORT_CLK_SEL_SPLL:
pll_name = "SPLL";
reg = SPLL_CTL;
refcount = plls->spll_refcount;
new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
SPLL_PLL_SSC;
break;
 
case PORT_CLK_SEL_WRPLL1:
case PORT_CLK_SEL_WRPLL2:
if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
pll_name = "WRPLL1";
reg = WRPLL_CTL1;
refcount = plls->wrpll1_refcount;
} else {
pll_name = "WRPLL2";
reg = WRPLL_CTL2;
refcount = plls->wrpll2_refcount;
}
 
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) |
WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
 
break;
 
case PORT_CLK_SEL_NONE:
WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
return;
default:
WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
return;
}
 
cur_val = I915_READ(reg);
 
WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
if (refcount == 1) {
WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
I915_WRITE(reg, new_val);
POSTING_READ(reg);
udelay(20);
} else {
WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
}
}
 
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
884,8 → 758,7
int type = intel_encoder->type;
uint32_t temp;
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config.pipe_bpp) {
case 18:
907,6 → 780,21
}
}
 
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
else
temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
 
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
953,7 → 841,9
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
if (IS_HASWELL(dev) &&
(intel_crtc->config.pch_pfit.enabled ||
intel_crtc->config.pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
971,9 → 861,7
}
 
if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
if (intel_hdmi->has_hdmi_sink)
if (intel_crtc->config.has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
986,9 → 874,21
type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
if (intel_dp->is_mst) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
} else
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
} else if (type == INTEL_OUTPUT_DP_MST) {
struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
 
if (intel_dp->is_mst) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
} else
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
intel_encoder->type, pipe_name(pipe));
1003,7 → 903,7
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
 
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
}
1017,8 → 917,13
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
enum intel_display_power_domain power_domain;
uint32_t tmp;
 
power_domain = intel_display_port_power_domain(intel_encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
 
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
return false;
 
1037,8 → 942,11
case TRANS_DDI_MODE_SELECT_DP_SST:
if (type == DRM_MODE_CONNECTOR_eDP)
return true;
return (type == DRM_MODE_CONNECTOR_DisplayPort);
case TRANS_DDI_MODE_SELECT_DP_MST:
return (type == DRM_MODE_CONNECTOR_DisplayPort);
/* if the transcoder is in MST state then
* connector isn't connected */
return false;
 
case TRANS_DDI_MODE_SELECT_FDI:
return (type == DRM_MODE_CONNECTOR_VGA);
1054,9 → 962,14
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(DDI_BUF_CTL(port));
 
if (!(tmp & DDI_BUF_CTL_ENABLE))
1085,6 → 998,9
 
if ((tmp & TRANS_DDI_PORT_MASK)
== TRANS_DDI_SELECT_PORT(port)) {
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
return false;
 
*pipe = i;
return true;
}
1096,76 → 1012,6
return false;
}
 
static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
uint32_t temp, ret;
enum port port = I915_MAX_PORTS;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int i;
 
if (cpu_transcoder == TRANSCODER_EDP) {
port = PORT_A;
} else {
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
temp &= TRANS_DDI_PORT_MASK;
 
for (i = PORT_B; i <= PORT_E; i++)
if (temp == TRANS_DDI_SELECT_PORT(i))
port = i;
}
 
if (port == I915_MAX_PORTS) {
WARN(1, "Pipe %c enabled on an unknown port\n",
pipe_name(pipe));
ret = PORT_CLK_SEL_NONE;
} else {
ret = I915_READ(PORT_CLK_SEL(port));
DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
"0x%08x\n", pipe_name(pipe), port_name(port),
ret);
}
 
return ret;
}
 
void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct intel_crtc *intel_crtc;
 
dev_priv->ddi_plls.spll_refcount = 0;
dev_priv->ddi_plls.wrpll1_refcount = 0;
dev_priv->ddi_plls.wrpll2_refcount = 0;
 
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
if (!intel_crtc->active) {
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
continue;
}
 
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);
 
switch (intel_crtc->ddi_pll_sel) {
case PORT_CLK_SEL_SPLL:
dev_priv->ddi_plls.spll_refcount++;
break;
case PORT_CLK_SEL_WRPLL1:
dev_priv->ddi_plls.wrpll1_refcount++;
break;
case PORT_CLK_SEL_WRPLL2:
dev_priv->ddi_plls.wrpll2_refcount++;
break;
}
}
}
 
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
1192,28 → 1038,44
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
 
if (crtc->config.has_audio) {
DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
pipe_name(crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
intel_write_eld(encoder, &crtc->config.adjusted_mode);
}
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_on(intel_dp);
intel_edp_panel_on(intel_dp);
}
 
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_ddi_init_dp_buf_reg(intel_encoder);
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
if (port != PORT_A)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
intel_hdmi->set_infoframes(encoder,
crtc->config.has_hdmi_sink,
&crtc->config.adjusted_mode);
}
}
 
1244,7 → 1106,8
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
}
 
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
1279,11 → 1142,12
if (port == PORT_A)
intel_dp_stop_link_train(intel_dp);
 
ironlake_edp_backlight_on(intel_dp);
intel_edp_backlight_on(intel_dp);
intel_edp_psr_enable(intel_dp);
}
 
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
if (intel_crtc->config.has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1301,11 → 1165,14
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
/* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
* register is part of the power well on Haswell. */
if (intel_crtc->config.has_audio) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
(pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
 
if (type == INTEL_OUTPUT_EDP) {
1312,7 → 1179,7
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_edp_psr_disable(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_edp_backlight_off(intel_dp);
}
}
 
1324,7 → 1191,7
 
if (lcpll & LCPLL_CD_SOURCE_FCLK) {
return 800000;
} else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
} else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) {
return 450000;
} else if (freq == LCPLL_CLK_FREQ_450) {
return 450000;
1343,11 → 1210,61
}
}
 
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
 
static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
 
val = I915_READ(WRPLL_CTL(pll->id));
I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL(pll->id));
}
 
static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
 
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(WRPLL_CTL(pll->id));
hw_state->wrpll = val;
 
return val & WRPLL_PLL_ENABLE;
}
 
static const char * const hsw_ddi_pll_names[] = {
"WRPLL 1",
"WRPLL 2",
};
 
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
int i;
 
dev_priv->num_shared_dpll = 2;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
dev_priv->shared_dplls[i].get_hw_state =
hsw_ddi_pll_get_hw_state;
}
 
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
* Don't even try to turn it on.
1390,10 → 1307,15
intel_wait_ddi_buf_idle(dev_priv, port);
}
 
val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
val = DP_TP_CTL_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
if (intel_dp->is_mst)
val |= DP_TP_CTL_MODE_MST;
else {
val |= DP_TP_CTL_MODE_SST;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
 
1432,13 → 1354,18
 
static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int type = intel_encoder->type;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
int type = intel_dig_port->base.type;
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
intel_dp_check_link_status(intel_dp);
if (type != INTEL_OUTPUT_DISPLAYPORT &&
type != INTEL_OUTPUT_EDP &&
type != INTEL_OUTPUT_UNKNOWN) {
return;
}
 
intel_dp_hot_plug(intel_encoder);
}
 
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
1478,6 → 1405,7
 
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
1490,6 → 1418,12
break;
}
 
if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
pipe_config->has_audio = true;
}
 
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
/*
1509,6 → 1443,8
pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
 
intel_ddi_clock_get(encoder, pipe_config);
}
 
static void intel_ddi_destroy(struct drm_encoder *encoder)
1579,8 → 1515,6
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *hdmi_connector = NULL;
struct intel_connector *dp_connector = NULL;
bool init_hdmi, init_dp;
 
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
1587,7 → 1521,7
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
port_name(port));
init_hdmi = true;
init_dp = true;
1604,7 → 1538,6
DRM_MODE_ENCODER_TMDS);
 
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->mode_set = intel_ddi_mode_set;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
1619,19 → 1552,27
 
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug;
 
if (init_dp)
dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
 
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hpd_irq_port[port] = intel_dig_port;
}
 
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
if (!intel_ddi_init_hdmi_connector(intel_dig_port))
goto err;
}
 
if (!dp_connector && !hdmi_connector) {
return;
 
err:
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
}
/drivers/video/drm/i915/intel_display.c
39,8 → 39,48
#include "i915_trace.h"
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
//#include <linux/dma_remapping.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
 
static inline void ndelay(unsigned long x)
{
udelay(DIV_ROUND_UP(x, 1000));
}
 
/* Primary plane formats supported by all gen */
#define COMMON_PRIMARY_FORMATS \
DRM_FORMAT_C8, \
DRM_FORMAT_RGB565, \
DRM_FORMAT_XRGB8888, \
DRM_FORMAT_ARGB8888
 
/* Primary plane formats for gen <= 3 */
static const uint32_t intel_primary_formats_gen2[] = {
COMMON_PRIMARY_FORMATS,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB1555,
};
 
/* Primary plane formats for gen >= 4 */
static const uint32_t intel_primary_formats_gen4[] = {
COMMON_PRIMARY_FORMATS, \
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_ABGR2101010,
};
 
/* Cursor formats */
static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
 
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 
#define MAX_ERRNO 4095
phys_addr_t get_bus_addr(void);
 
55,8 → 95,9
return v;
}
 
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void intel_increase_pllclock(struct drm_device *dev,
enum pipe pipe);
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
65,7 → 106,27
 
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
static void intel_dp_set_m_n(struct intel_crtc *crtc);
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc);
 
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
if (!connector->mst_port)
return connector->encoder;
else
return &connector->mst_port->mst_encoders[pipe]->base;
}
 
typedef struct {
int min, max;
339,6 → 400,22
.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
 
static const intel_limit_t intel_limits_chv = {
/*
* These are the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
* clock and actual rate limits are more relaxed, so checking
* them would make no difference.
*/
.dot = { .min = 25000 * 5, .max = 540000 * 5},
.vco = { .min = 4860000, .max = 6700000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
.m2 = { .min = 24 << 22, .max = 175 << 22 },
.p1 = { .min = 2, .max = 4 },
.p2 = { .p2_slow = 1, .p2_fast = 14 },
};
 
static void vlv_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
423,6 → 500,8
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
} else if (IS_CHERRYVIEW(dev)) {
limit = &intel_limits_chv;
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
467,6 → 546,17
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
 
static void chv_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return;
clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
}
 
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
742,6 → 832,58
return found;
}
 
static bool
chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
uint64_t m2;
int found = false;
 
memset(best_clock, 0, sizeof(*best_clock));
 
/*
* Based on hardware doc, the n always set to 1, and m1 always
* set to 2. If requires to support 200Mhz refclk, we need to
* revisit this because n may not 1 anymore.
*/
clock.n = 1, clock.m1 = 2;
target *= 5; /* fast clock */
 
for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
for (clock.p2 = limit->p2.p2_fast;
clock.p2 >= limit->p2.p2_slow;
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
 
clock.p = clock.p1 * clock.p2;
 
m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
clock.n) << 22, refclk * clock.m1);
 
if (m2 > INT_MAX/clock.m1)
continue;
 
clock.m2 = m2;
 
chv_clock(refclk, &clock);
 
if (!intel_PLL_is_valid(dev, limit, &clock))
continue;
 
/* based on hardware requirement, prefer bigger p
*/
if (clock.p > best_clock->p) {
*best_clock = clock;
found = true;
}
}
}
 
return found;
}
 
bool intel_crtc_active(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
752,10 → 894,10
* We can ditch the adjusted_mode.crtc_clock check as soon
* as Haswell has gained clock readout/fastboot support.
*
* We can ditch the crtc->fb check as soon as we can
* We can ditch the crtc->primary->fb check as soon as we can
* properly reconstruct framebuffers.
*/
return intel_crtc->active && crtc->fb &&
return intel_crtc->active && crtc->primary->fb &&
intel_crtc->config.adjusted_mode.crtc_clock;
}
 
776,7 → 918,7
frame = I915_READ(frame_reg);
 
if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
DRM_DEBUG_KMS("vblank wait timed out\n");
WARN(1, "vblank wait timed out\n");
}
 
/**
979,11 → 1121,6
bool cur_state;
struct intel_dpll_hw_state hw_state;
 
if (HAS_PCH_LPT(dev_priv->dev)) {
DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
return;
}
 
if (WARN (!pll,
"asserting DPLL %s with no DPLL\n", state_string(state)))
return;
1044,7 → 1181,7
u32 val;
 
/* ILK FDI PLL is always enabled */
if (dev_priv->info->gen == 5)
if (INTEL_INFO(dev_priv->dev)->gen == 5)
return;
 
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1106,9 → 1243,7
struct drm_device *dev = dev_priv->dev;
bool cur_state;
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
else if (IS_845G(dev) || IS_I865G(dev))
if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1133,7 → 1268,7
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
 
if (!intel_display_power_enabled(dev_priv->dev,
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
1177,7 → 1312,7
if (INTEL_INFO(dev)->gen >= 4) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
WARN((val & DISPLAY_PLANE_ENABLE),
WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
plane_name(pipe));
return;
1199,27 → 1334,27
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int reg, i;
int reg, sprite;
u32 val;
 
if (IS_VALLEYVIEW(dev)) {
for (i = 0; i < dev_priv->num_plane; i++) {
reg = SPCNTR(pipe, i);
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
WARN((val & SP_ENABLE),
WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, i), pipe_name(pipe));
sprite_name(pipe, sprite), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
val = I915_READ(reg);
WARN((val & SPRITE_ENABLE),
WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
} else if (INTEL_INFO(dev)->gen >= 5) {
reg = DVSCNTR(pipe);
val = I915_READ(reg);
WARN((val & DVS_ENABLE),
WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
}
1264,6 → 1399,9
u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
return false;
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
return false;
} else {
if ((val & DP_PIPE_MASK) != (pipe << 30))
return false;
1280,6 → 1418,9
if (HAS_PCH_CPT(dev_priv->dev)) {
if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
return false;
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
return false;
} else {
if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
return false;
1378,36 → 1519,46
if (!IS_VALLEYVIEW(dev))
return;
 
/*
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
* CHV x1 PHY (DP/HDMI D)
* IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
*/
if (IS_CHERRYVIEW(dev)) {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
} else {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
}
}
 
static void intel_reset_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_VALLEYVIEW(dev))
return;
if (IS_CHERRYVIEW(dev)) {
enum dpio_phy phy;
u32 val;
 
/*
* Enable the CRI clock source so we can get at the display and the
* reference clock for VGA hotplug / manual detection.
*/
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV |
DPLL_INTEGRATED_CRI_CLK_VLV);
for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
/* Poll for phypwrgood signal */
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
 
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
* b. The other bits such as sfr settings / modesel may all be set
* to 0.
* Deassert common lane reset for PHY.
*
* This should only be done on init and resume from S3 with both
* PLLs disabled, or we risk losing DPIO and PLL synchronization.
* This should only be done on init and resume from S3
* with both PLLs disabled, or we risk losing DPIO and
* PLL synchronization.
*/
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
val = I915_READ(DISPLAY_PHY_CONTROL);
I915_WRITE(DISPLAY_PHY_CONTROL,
PHY_COM_LANE_RESET_DEASSERT(phy, val));
}
}
}
 
static void vlv_enable_pll(struct intel_crtc *crtc)
{
1447,6 → 1598,44
udelay(150); /* wait for warmup */
}
 
static void chv_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 tmp;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Enable back the 10bit clock to display controller */
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
tmp |= DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
 
/*
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
*/
udelay(1);
 
/* Enable PLL */
I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
 
/* Check PLL is locked */
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
 
/* not sure when this should be written */
I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(pipe));
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
1457,7 → 1646,7
assert_pipe_disabled(dev_priv, crtc->pipe);
 
/* No really, not for ILK+ */
BUG_ON(dev_priv->info->gen >= 5);
BUG_ON(INTEL_INFO(dev)->gen >= 5);
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev) && !IS_I830(dev))
1530,31 → 1719,94
val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
 
}
 
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 val;
 
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
 
/* Set PLL en = 0 */
val = DPLL_SSC_REF_CLOCK_CHV;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Disable 10bit clock to display controller */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
 
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport)
{
u32 port_mask;
int dpll_reg;
 
switch (dport->port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
break;
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
dpll_reg = DPLL(0);
break;
case PORT_D:
port_mask = DPLL_PORTD_READY_MASK;
dpll_reg = DPIO_PHY_STATUS;
break;
default:
BUG();
}
 
if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
port_name(dport->port), I915_READ(DPLL(0)));
port_name(dport->port), I915_READ(dpll_reg));
}
 
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
if (WARN_ON(pll == NULL))
return;
 
WARN_ON(!pll->refcount);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
 
pll->mode_set(dev_priv, pll);
}
}
 
/**
* ironlake_enable_shared_dpll - enable PCH PLL
* intel_enable_shared_dpll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
1561,13 → 1813,12
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
static void intel_enable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
/* PCH PLLs only available on ILK, SNB and IVB */
BUG_ON(dev_priv->info->gen < 5);
if (WARN_ON(pll == NULL))
return;
 
1585,18 → 1836,21
}
WARN_ON(pll->on);
 
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->enable(dev_priv, pll);
pll->on = true;
}
 
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
BUG_ON(INTEL_INFO(dev)->gen < 5);
if (WARN_ON(pll == NULL))
return;
 
1620,6 → 1874,8
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->disable(dev_priv, pll);
pll->on = false;
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
 
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1631,7 → 1887,7
uint32_t reg, val, pipeconf_val;
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
BUG_ON(INTEL_INFO(dev)->gen < 5);
 
/* Make sure PCH DPLL is enabled */
assert_shared_dpll_enabled(dev_priv,
1684,7 → 1940,7
u32 val, pipeconf_val;
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
 
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1758,21 → 2014,16
 
/**
* intel_enable_pipe - enable a pipe, asserting requirements
* @dev_priv: i915 private structure
* @pipe: pipe to enable
* @pch_port: on ILK+, is this pipe driving a PCH port or not
* @crtc: crtc responsible for the pipe
*
* Enable @pipe, making sure that various hardware specific requirements
* Enable @crtc's pipe, making sure that various hardware specific requirements
* are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
*
* @pipe should be %PIPE_A or %PIPE_B.
*
* Will wait until the pipe is actually running (i.e. first vblank) before
* returning.
*/
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port, bool dsi)
static void intel_enable_pipe(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum pipe pch_transcoder;
1794,12 → 2045,12
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
if (dsi)
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
else {
if (pch_port) {
if (crtc->config.has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv,
1810,11 → 2061,14
 
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
if (val & PIPECONF_ENABLE) {
WARN_ON(!(pipe == PIPE_A &&
dev_priv->quirks & QUIRK_PIPEA_FORCE));
return;
}
 
I915_WRITE(reg, val | PIPECONF_ENABLE);
intel_wait_for_vblank(dev_priv->dev, pipe);
POSTING_READ(reg);
}
 
/**
1865,7 → 2119,8
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
struct drm_device *dev = dev_priv->dev;
u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
 
I915_WRITE(reg, I915_READ(reg));
POSTING_READ(reg);
1872,7 → 2127,7
}
 
/**
* intel_enable_primary_plane - enable the primary plane on a given pipe
* intel_enable_primary_hw_plane - enable the primary plane on a given pipe
* @dev_priv: i915 private structure
* @plane: plane to enable
* @pipe: pipe being fed
1879,9 → 2134,10
*
* Enable @plane on @pipe, making sure that @pipe is running first.
*/
static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
1890,22 → 2146,21
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, pipe);
 
WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
if (intel_crtc->primary_enabled)
return;
 
intel_crtc->primary_enabled = true;
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
if (val & DISPLAY_PLANE_ENABLE)
return;
WARN_ON(val & DISPLAY_PLANE_ENABLE);
 
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
 
/**
* intel_disable_primary_plane - disable the primary plane
* intel_disable_primary_hw_plane - disable the primary hardware plane
* @dev_priv: i915 private structure
* @plane: plane to disable
* @pipe: pipe consuming the data
1912,7 → 2167,7
*
* Disable @plane; should be an independent operation.
*/
static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
struct intel_crtc *intel_crtc =
1920,18 → 2175,17
int reg;
u32 val;
 
WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
if (!intel_crtc->primary_enabled)
return;
 
intel_crtc->primary_enabled = false;
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
if ((val & DISPLAY_PLANE_ENABLE) == 0)
return;
WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
 
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
 
static bool need_vtd_wa(struct drm_device *dev)
1943,15 → 2197,25
return false;
}
 
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
{
int tile_height;
 
tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
return ALIGN(height, tile_height);
}
 
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
struct intel_engine_cs *pipelined)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
int ret;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2008,8 → 2272,10
 
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
// i915_gem_object_unpin_fence(obj);
// i915_gem_object_unpin(obj);
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
i915_gem_object_unpin_fence(obj);
// i915_gem_object_unpin_from_display_plane(obj);
}
 
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2039,31 → 2305,131
}
}
 
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int intel_format_to_fourcc(int format)
{
switch (format) {
case DISPPLANE_8BPP:
return DRM_FORMAT_C8;
case DISPPLANE_BGRX555:
return DRM_FORMAT_XRGB1555;
case DISPPLANE_BGRX565:
return DRM_FORMAT_RGB565;
default:
case DISPPLANE_BGRX888:
return DRM_FORMAT_XRGB8888;
case DISPPLANE_RGBX888:
return DRM_FORMAT_XBGR8888;
case DISPPLANE_BGRX101010:
return DRM_FORMAT_XRGB2101010;
case DISPPLANE_RGBX101010:
return DRM_FORMAT_XBGR2101010;
}
}
 
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
struct intel_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
u32 base = plane_config->base;
 
if (plane_config->size == 0)
return false;
 
obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
plane_config->size);
if (!obj)
return false;
 
main_fb_obj = obj;
 
if (plane_config->tiled) {
obj->tiling_mode = I915_TILING_X;
obj->stride = crtc->base.primary->fb->pitches[0];
}
 
mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
mode_cmd.width = crtc->base.primary->fb->width;
mode_cmd.height = crtc->base.primary->fb->height;
mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
 
mutex_lock(&dev->struct_mutex);
 
if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
 
obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
 
DRM_DEBUG_KMS("plane fb obj %p\n", obj);
return true;
 
out_unref_obj:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return false;
}
 
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct intel_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
 
if (!intel_crtc->base.primary->fb)
return;
 
if (intel_alloc_plane_obj(intel_crtc, plane_config))
return;
 
kfree(intel_crtc->base.primary->fb);
intel_crtc->base.primary->fb = NULL;
 
/*
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
*/
for_each_crtc(dev, c) {
i = to_intel_crtc(c);
 
if (c == &intel_crtc->base)
continue;
 
if (!i->active)
continue;
 
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
 
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
}
}
 
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
 
switch (plane) {
case 0:
case 1:
break;
default:
DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
return -EINVAL;
}
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
2135,36 → 2501,21
} else
I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
 
return 0;
}
 
static int ironlake_update_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb, int x, int y)
static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
 
switch (plane) {
case 0:
case 1:
case 2:
break;
default:
DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
return -EINVAL;
}
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
2228,8 → 2579,6
I915_WRITE(DSPLINOFF(plane), linear_offset);
}
POSTING_READ(reg);
 
return 0;
}
 
/* Assume fb object is pinned & idle & fenced and just update base pointers */
2242,9 → 2591,11
 
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
 
return dev_priv->display.update_plane(crtc, fb, x, y);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
return 0;
}
 
#if 0
2267,7 → 2618,7
* pending_flip_queue really got woken up.
*/
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
 
2275,19 → 2626,21
intel_finish_page_flip_plane(dev, plane);
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
mutex_lock(&crtc->mutex);
drm_modeset_lock(&crtc->mutex, NULL);
/*
* FIXME: Once we have proper support for primary planes (and
* disabling them without disabling the entire crtc) allow again
* a NULL crtc->fb.
* a NULL crtc->primary->fb.
*/
if (intel_crtc->active && crtc->fb)
dev_priv->display.update_plane(crtc, crtc->fb,
crtc->x, crtc->y);
mutex_unlock(&crtc->mutex);
if (intel_crtc->active && crtc->primary->fb)
dev_priv->display.update_primary_plane(crtc,
crtc->primary->fb,
crtc->x,
crtc->y);
drm_modeset_unlock(&crtc->mutex);
}
}
 
2294,7 → 2647,7
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
2314,32 → 2667,24
return ret;
}
 
static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
 
if (!dev->primary->master)
return;
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
 
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return;
spin_lock_irqsave(&dev->event_lock, flags);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
 
switch (intel_crtc->pipe) {
case 0:
master_priv->sarea_priv->pipeA_x = x;
master_priv->sarea_priv->pipeA_y = y;
break;
case 1:
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
break;
default:
break;
return pending;
}
}
#endif
 
static int
2349,9 → 2694,13
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_framebuffer *old_fb;
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
 
 
/* no fb bound */
if (!fb) {
DRM_ERROR("No FB bound\n");
2366,11 → 2715,12
}
 
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
to_intel_framebuffer(fb)->obj,
NULL);
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret == 0)
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("pin & fence failed\n");
return ret;
}
2388,7 → 2738,7
* whether the platform allows pfit disable with pipe active, and only
* then update the pipesrc and pfit state, even on the flip path.
*/
if (i915_fastboot) {
if (i915.fastboot) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
 
2406,16 → 2756,12
intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
 
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
}
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
old_fb = crtc->fb;
crtc->fb = fb;
if (intel_crtc->active)
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
 
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
 
2422,11 → 2768,13
if (old_fb) {
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
 
return 0;
2512,12 → 2860,10
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp, tries;
 
/* FDI needs bits from pipe & plane first */
/* FDI needs bits from pipe first */
assert_pipe_enabled(dev_priv, pipe);
assert_plane_enabled(dev_priv, plane);
 
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
2948,9 → 3294,8
udelay(100);
 
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
if (HAS_PCH_IBX(dev))
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
}
 
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
2977,41 → 3322,47
udelay(100);
}
 
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
struct intel_crtc *crtc;
 
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
/* Note that we don't need to be called with mode_config.lock here
* as our list of CRTC objects is static for the lifetime of the
* device and so cannot disappear as we iterate. Similarly, we can
* happily treat the predicates as racy, atomic checks as userspace
* cannot claim and pin a new fb without at least acquring the
* struct_mutex and so serialising with us.
*/
for_each_intel_crtc(dev, crtc) {
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
 
spin_lock_irqsave(&dev->event_lock, flags);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
if (crtc->unpin_work)
intel_wait_for_vblank(dev, crtc->pipe);
 
return pending;
return true;
}
 
return false;
}
 
#if 0
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (crtc->fb == NULL)
if (crtc->primary->fb == NULL)
return;
 
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
 
wait_event(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc));
WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc),
60*HZ) == 0);
 
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->fb);
intel_finish_fb(crtc->primary->fb);
mutex_unlock(&dev->struct_mutex);
}
#endif
3222,7 → 3573,7
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
ironlake_enable_shared_dpll(intel_crtc);
intel_enable_shared_dpll(intel_crtc);
 
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
3286,7 → 3637,7
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
 
static void intel_put_shared_dpll(struct intel_crtc *crtc)
void intel_put_shared_dpll(struct intel_crtc *crtc)
{
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
3306,7 → 3657,7
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
 
static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3326,6 → 3677,8
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
 
WARN_ON(pll->refcount);
 
goto found;
}
 
3359,20 → 3712,13
return NULL;
 
found:
if (pll->refcount == 0)
pll->hw_state = crtc->config.dpll_hw_state;
 
crtc->config.shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
 
if (pll->active == 0) {
memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
sizeof(pll->hw_state));
 
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
 
pll->mode_set(dev_priv, pll);
}
pll->refcount++;
 
return pll;
3417,37 → 3763,43
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_plane *plane;
struct intel_plane *intel_plane;
 
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
intel_plane_restore(&intel_plane->base);
}
}
 
static void intel_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_plane *plane;
struct intel_plane *intel_plane;
 
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
intel_plane_disable(&intel_plane->base);
}
}
 
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!crtc->config.ips_enabled)
return;
 
/* We can only enable IPS after we enable a plane and wait for a vblank.
* We guarantee that the plane is enabled by calling intel_enable_ips
* only after intel_enable_plane. And intel_enable_plane already waits
* for a vblank, so all we need to do here is to enable the IPS bit. */
/* We can only enable IPS after we enable a plane and wait for a vblank */
intel_wait_for_vblank(dev, crtc->pipe);
 
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(crtc->base.dev)) {
if (IS_BROADWELL(dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
mutex_unlock(&dev_priv->rps.hw_lock);
3477,10 → 3829,13
return;
 
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(crtc->base.dev)) {
if (IS_BROADWELL(dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
/* wait for pcode to finish disabling IPS, which may take up to 42ms */
if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
POSTING_READ(IPS_CTL);
3513,7 → 3868,7
}
 
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
if (!HAS_GMCH_DISPLAY(dev))
palreg = LGC_PALETTE(pipe);
 
/* Workaround : Do not read or write the pipe palette/gamma data while
3537,6 → 3892,66
hsw_enable_ips(intel_crtc);
}
 
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev->struct_mutex);
dev_priv->mm.interruptible = false;
dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
 
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
*/
}
 
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
drm_vblank_on(dev, pipe);
 
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
 
hsw_enable_ips(intel_crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
 
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
hsw_disable_ips(intel_crtc);
 
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
drm_vblank_off(dev, pipe);
}
 
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3544,7 → 3959,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
enum plane plane = intel_crtc->plane;
 
WARN_ON(!crtc->enabled);
 
3551,6 → 3966,28
if (intel_crtc->active)
return;
 
if (intel_crtc->config.has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
}
 
ironlake_set_pipeconf(crtc);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
crtc->x, crtc->y);
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3579,19 → 4016,11
intel_crtc_load_lut(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe,
intel_crtc->config.has_pch_encoder, false);
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_enable_pipe(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
ironlake_pch_enable(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
3598,15 → 4027,7
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
 
/*
* There seems to be a race in PCH platform hw (at least on some
* outputs) where an enabled pipe still completes any pageflip right
* away (as if the pipe is off) instead of waiting for vblank. As soon
* as the first vblank happend, everything works as expected. Hence just
* wait for one vblank before returning to avoid strange things
* happening.
*/
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_crtc_enable_planes(crtc);
}
 
/* IPS only exists on ULT machines and is tied to pipe A. */
3615,47 → 4036,6
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
 
static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
 
hsw_enable_ips(intel_crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
 
/* FBC must be disabled before disabling the plane on HSW. */
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
hsw_disable_ips(intel_crtc);
 
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_plane(dev_priv, plane, pipe);
}
 
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
3669,7 → 4049,7
 
/* We want to get the other_active_crtc only if there's only 1 other
* active crtc. */
list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
for_each_intel_crtc(dev, crtc_it) {
if (!crtc_it->active || crtc_it == crtc)
continue;
 
3692,6 → 4072,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
enum plane plane = intel_crtc->plane;
 
WARN_ON(!crtc->enabled);
 
3698,19 → 4079,42
if (intel_crtc->active)
return;
 
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_enable_shared_dpll(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
}
 
haswell_set_pipeconf(crtc);
 
intel_set_pipe_csc(crtc);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
crtc->x, crtc->y);
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 
if (intel_crtc->config.has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
if (intel_crtc->config.has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
dev_priv->display.fdi_link_train(crtc);
}
 
intel_ddi_enable_pipe_clock(intel_crtc);
 
ironlake_pfit_enable(intel_crtc);
3725,12 → 4129,14
intel_ddi_enable_transcoder_func(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe,
intel_crtc->config.has_pch_encoder, false);
intel_enable_pipe(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
 
if (intel_crtc->config.dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
3739,17 → 4145,7
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
haswell_crtc_enable_planes(crtc);
 
/*
* There seems to be a race in PCH platform hw (at least on some
* outputs) where an enabled pipe still completes any pageflip right
* away (as if the pipe is off) instead of waiting for vblank. As soon
* as the first vblank happend, everything works as expected. Hence just
* wait for one vblank before returning to avoid strange things
* happening.
*/
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_crtc_enable_planes(crtc);
}
 
static void ironlake_pfit_disable(struct intel_crtc *crtc)
3774,31 → 4170,24
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
 
 
if (!intel_crtc->active)
return;
 
intel_crtc_disable_planes(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
 
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_plane(dev_priv, plane, pipe);
 
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
 
intel_disable_pipe(dev_priv, pipe);
 
if (intel_crtc->config.dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
 
ironlake_pfit_disable(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
3852,7 → 4241,7
if (!intel_crtc->active)
return;
 
haswell_crtc_disable_planes(crtc);
intel_crtc_disable_planes(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
3869,10 → 4258,6
 
intel_ddi_disable_pipe_clock(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3879,6 → 4264,10
intel_ddi_fdi_disable(crtc);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
intel_crtc->active = false;
intel_update_watermarks(crtc);
 
3885,6 → 4274,9
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_disable_shared_dpll(intel_crtc);
}
 
static void ironlake_crtc_off(struct drm_crtc *crtc)
3893,53 → 4285,7
intel_put_shared_dpll(intel_crtc);
}
 
static void haswell_crtc_off(struct drm_crtc *crtc)
{
intel_ddi_put_crtc_pll(crtc);
}
 
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev->struct_mutex);
dev_priv->mm.interruptible = false;
// (void) intel_overlay_switch_off(intel_crtc->overlay);
dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
 
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
*/
}
 
/**
* i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
* cursor plane briefly if not already running after enabling the display
* plane.
* This workaround avoids occasional blank screens when self refresh is
* enabled.
*/
static void
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
{
u32 cntl = I915_READ(CURCNTR(pipe));
 
if ((cntl & CURSOR_MODE) == 0) {
u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
 
I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
intel_wait_for_vblank(dev_priv->dev, pipe);
I915_WRITE(CURCNTR(pipe), cntl);
I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
I915_WRITE(FW_BLC_SELF, fw_bcl_self);
}
}
 
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
3964,8 → 4310,128
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
 
int valleyview_get_vco(struct drm_i915_private *dev_priv)
static enum intel_display_power_domain port_to_power_domain(enum port port)
{
switch (port) {
case PORT_A:
return POWER_DOMAIN_PORT_DDI_A_4_LANES;
case PORT_B:
return POWER_DOMAIN_PORT_DDI_B_4_LANES;
case PORT_C:
return POWER_DOMAIN_PORT_DDI_C_4_LANES;
case PORT_D:
return POWER_DOMAIN_PORT_DDI_D_4_LANES;
default:
WARN_ON_ONCE(1);
return POWER_DOMAIN_PORT_OTHER;
}
}
 
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
 
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct intel_digital_port *intel_dig_port;
 
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
/* Only DDI platforms should ever use this output type */
WARN_ON_ONCE(!HAS_DDI(dev));
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
return port_to_power_domain(intel_dig_port->port);
case INTEL_OUTPUT_DP_MST:
intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
return port_to_power_domain(intel_dig_port->port);
case INTEL_OUTPUT_ANALOG:
return POWER_DOMAIN_PORT_CRT;
case INTEL_OUTPUT_DSI:
return POWER_DOMAIN_PORT_DSI;
default:
return POWER_DOMAIN_PORT_OTHER;
}
}
 
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned long mask;
enum transcoder transcoder;
 
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (intel_crtc->config.pch_pfit.enabled ||
intel_crtc->config.pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
mask |= BIT(intel_display_port_power_domain(intel_encoder));
 
return mask;
}
 
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
bool enable)
{
if (dev_priv->power_domains.init_power_on == enable)
return;
 
if (enable)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
else
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
dev_priv->power_domains.init_power_on = enable;
}
 
static void modeset_update_crtc_power_domains(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
 
/*
* First get all needed power domains, then put all unneeded, to avoid
* any unnecessary toggling of the power wells.
*/
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
 
if (!crtc->base.enabled)
continue;
 
pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
 
for_each_power_domain(domain, pipe_domains[crtc->pipe])
intel_display_power_get(dev_priv, domain);
}
 
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
 
for_each_power_domain(domain, crtc->enabled_power_domains)
intel_display_power_put(dev_priv, domain);
 
crtc->enabled_power_domains = pipe_domains[crtc->pipe];
}
 
intel_display_set_init_power(dev_priv, false);
}
 
/* returns HPLL frequency in kHz */
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
 
/* Obtain SKU information */
3974,9 → 4440,25
CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->dpio_lock);
 
return vco_freq[hpll_freq];
return vco_freq[hpll_freq] * 1000;
}
 
static void vlv_update_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
dev_priv->vlv_cdclk_freq);
 
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
}
 
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
3983,9 → 4465,11
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
 
if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
 
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
else if (cdclk == 266)
else if (cdclk == 266667)
cmd = 1;
else
cmd = 0;
4002,18 → 4486,23
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
if (cdclk == 400) {
if (cdclk == 400000) {
u32 divider, vco;
 
vco = valleyview_get_vco(dev_priv);
divider = ((vco << 1) / cdclk) - 1;
divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
 
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~0xf;
val &= ~DISPLAY_FREQUENCY_VALUES;
val |= divider;
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
 
if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
50))
DRM_ERROR("timed out waiting for CDclk change\n");
mutex_unlock(&dev_priv->dpio_lock);
}
 
4026,7 → 4515,7
* For high bandwidth configs, we set a higher latency in the bunit
* so that the core display fetch happens in time to avoid underruns.
*/
if (cdclk == 400)
if (cdclk == 400000)
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
4033,69 → 4522,49
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
mutex_unlock(&dev_priv->dpio_lock);
 
/* Since we changed the CDclk, we need to update the GMBUSFREQ too */
intel_i2c_reset(dev);
vlv_update_cdclk(dev);
}
 
static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
{
int cur_cdclk, vco;
int divider;
 
vco = valleyview_get_vco(dev_priv);
 
mutex_lock(&dev_priv->dpio_lock);
divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
 
divider &= 0xf;
 
cur_cdclk = (vco << 1) / (divider + 1);
 
return cur_cdclk;
}
 
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int cur_cdclk;
int vco = valleyview_get_vco(dev_priv);
int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
 
cur_cdclk = valleyview_cur_cdclk(dev_priv);
 
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
* 320MHz
* 320/333MHz (depends on HPLL freq)
* 400MHz
* So we check to see whether we're above 90% of the lower bin and
* adjust if needed.
*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
if (max_pixclk > 288000) {
return 400;
} else if (max_pixclk > 240000) {
return 320;
} else
return 266;
/* Looks like the 200MHz CDclk freq doesn't work on some configs */
if (max_pixclk > freq_320*9/10)
return 400000;
else if (max_pixclk > 266667*9/10)
return freq_320;
else if (max_pixclk > 0)
return 266667;
else
return 200000;
}
 
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
unsigned modeset_pipes,
struct intel_crtc_config *pipe_config)
/* compute the max pixel clock for new configuration */
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc;
int max_pixclk = 0;
 
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
if (modeset_pipes & (1 << intel_crtc->pipe))
for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->new_enabled)
max_pixclk = max(max_pixclk,
pipe_config->adjusted_mode.crtc_clock);
else if (intel_crtc->base.enabled)
max_pixclk = max(max_pixclk,
intel_crtc->config.adjusted_mode.crtc_clock);
intel_crtc->new_config->adjusted_mode.crtc_clock);
}
 
return max_pixclk;
4102,21 → 4571,18
}
 
static void valleyview_modeset_global_pipes(struct drm_device *dev,
unsigned *prepare_pipes,
unsigned modeset_pipes,
struct intel_crtc_config *pipe_config)
unsigned *prepare_pipes)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
pipe_config);
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
int max_pixclk = intel_mode_max_pixclk(dev_priv);
 
if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
dev_priv->vlv_cdclk_freq)
return;
 
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head)
/* disable/enable all currently active pipes while we change cdclk */
for_each_intel_crtc(dev, intel_crtc)
if (intel_crtc->base.enabled)
*prepare_pipes |= (1 << intel_crtc->pipe);
}
4124,12 → 4590,12
static void valleyview_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
int cur_cdclk = valleyview_cur_cdclk(dev_priv);
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
 
if (req_cdclk != cur_cdclk)
if (req_cdclk != dev_priv->vlv_cdclk_freq)
valleyview_set_cdclk(dev, req_cdclk);
modeset_update_crtc_power_domains(dev);
}
 
static void valleyview_crtc_enable(struct drm_crtc *crtc)
4141,6 → 4607,7
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
bool is_dsi;
u32 dspcntr;
 
WARN_ON(!crtc->enabled);
 
4147,16 → 4614,49
if (intel_crtc->active)
return;
 
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
 
if (!is_dsi && !IS_CHERRYVIEW(dev))
vlv_prepare_pll(intel_crtc);
 
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_set_pipe_timings(intel_crtc);
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
 
i9xx_set_pipeconf(intel_crtc);
 
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
 
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
crtc->x, crtc->y);
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
 
if (!is_dsi)
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
chv_enable_pll(intel_crtc);
else
vlv_enable_pll(intel_crtc);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
4167,17 → 4667,26
intel_crtc_load_lut(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe, false, is_dsi);
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_enable_pipe(intel_crtc);
 
intel_update_fbc(dev);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
intel_crtc_enable_planes(crtc);
 
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
 
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
}
 
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
4186,6 → 4695,7
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 dspcntr;
 
WARN_ON(!crtc->enabled);
 
4192,8 → 4702,42
if (intel_crtc->active)
return;
 
i9xx_set_pll_dividers(intel_crtc);
 
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_set_pipe_timings(intel_crtc);
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
 
i9xx_set_pipeconf(intel_crtc);
 
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
 
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
crtc->x, crtc->y);
 
intel_crtc->active = true;
 
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
4205,21 → 4749,25
intel_crtc_load_lut(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(dev_priv, pipe, false, false);
intel_enable_primary_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
if (IS_G4X(dev))
g4x_fixup_plane(dev_priv, pipe);
intel_crtc_update_cursor(crtc, true);
intel_enable_pipe(intel_crtc);
 
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
intel_update_fbc(dev);
intel_crtc_enable_planes(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So don't enable underrun reporting before at least some planes
* are enabled.
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
 
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
 
static void i9xx_pfit_disable(struct intel_crtc *crtc)
4244,26 → 4792,42
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
if (!intel_crtc->active)
return;
 
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So diasble underrun reporting before all the planes get disabled.
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
 
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
* moment. So to make sure the plane gets truly disabled, disable
* first the self-refresh mode. The self-refresh enable bit in turn
* will be checked/applied by the HW only at the next frame start
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
/* Give the overlay scaler a chance to disable if it's on this pipe */
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
* We also need to wait on all gmch platforms because of the
* self-refresh mode constraint explained above.
*/
intel_wait_for_vblank(dev, pipe);
 
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
 
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_plane(dev_priv, plane, pipe);
 
intel_disable_pipe(dev_priv, pipe);
 
i9xx_pfit_disable(intel_crtc);
4272,15 → 4836,24
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
vlv_disable_pll(dev_priv, pipe);
else if (!IS_VALLEYVIEW(dev))
else
i9xx_disable_pll(dev_priv, pipe);
}
 
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
 
intel_crtc->active = false;
intel_update_watermarks(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void i9xx_crtc_off(struct drm_crtc *crtc)
4318,9 → 4891,38
break;
}
#endif
}
 
/* Master function to enable/disable CRTC and corresponding power wells */
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain domain;
unsigned long domains;
 
if (enable) {
if (!intel_crtc->active) {
domains = get_crtc_power_domains(crtc);
for_each_power_domain(domain, domains)
intel_display_power_get(dev_priv, domain);
intel_crtc->enabled_power_domains = domains;
 
dev_priv->display.crtc_enable(crtc);
}
} else {
if (intel_crtc->active) {
dev_priv->display.crtc_disable(crtc);
 
domains = intel_crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
intel_display_power_put(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
}
}
}
 
/**
* Sets the power management mode of the pipe and plane.
*/
4327,7 → 4929,6
void intel_crtc_update_dpms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
bool enable = false;
 
4334,10 → 4935,7
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
enable |= intel_encoder->connectors_active;
 
if (enable)
dev_priv->display.crtc_enable(crtc);
else
dev_priv->display.crtc_disable(crtc);
intel_crtc_control(crtc, enable);
 
intel_crtc_update_sarea(crtc, enable);
}
4347,25 → 4945,23
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
 
dev_priv->display.crtc_disable(crtc);
intel_crtc->eld_vld = false;
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
 
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
 
if (crtc->fb) {
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
intel_unpin_fb_obj(old_obj);
i915_gem_track_fb(old_obj, NULL,
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
crtc->fb = NULL;
crtc->primary->fb = NULL;
}
 
/* Update computed state. */
4417,12 → 5013,18
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.base.id,
drm_get_connector_name(&connector->base));
connector->base.name);
 
/* there is no real hw state for MST connectors */
if (connector->mst_port)
return;
 
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
 
if (encoder) {
WARN(!encoder->connectors_active,
"encoder->connectors_active not set\n");
 
4439,6 → 5041,7
"encoder active on the wrong pipe\n");
}
}
}
 
/* Even simpler default implementation, if there's really no special case to
* consider. */
4579,7 → 5182,7
static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
pipe_config->ips_enabled = i915_enable_ips &&
pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config->pipe_bpp <= 24;
}
4641,9 → 5244,11
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
 
/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
* clock survives for now. */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
/*
* XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
* old clock survives for now.
*/
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll;
 
if (pipe_config->has_pch_encoder)
4654,7 → 5259,22
 
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
return 400000; /* FIXME */
struct drm_i915_private *dev_priv = dev->dev_private;
int vco = valleyview_get_vco(dev_priv);
u32 val;
int divider;
 
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
 
divider = val & DISPLAY_FREQUENCY_VALUES;
 
WARN((val & DISPLAY_FREQUENCY_STATUS) !=
(divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
"cdclk change in progress\n");
 
return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
}
 
static int i945_get_display_clock_speed(struct drm_device *dev)
4780,8 → 5400,8
 
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
return i915_panel_use_ssc != 0;
if (i915.panel_use_ssc >= 0)
return i915.panel_use_ssc != 0;
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
4821,8 → 5441,6
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
u32 fp, fp2 = 0;
 
if (IS_PINEVIEW(dev)) {
4835,17 → 5453,14
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
 
I915_WRITE(FP0(pipe), fp);
crtc->config.dpll_hw_state.fp0 = fp;
 
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
reduced_clock && i915.powersave) {
crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
I915_WRITE(FP1(pipe), fp);
crtc->config.dpll_hw_state.fp1 = fp;
}
}
4923,12 → 5538,34
 
static void vlv_update_pll(struct intel_crtc *crtc)
{
u32 dpll, dpll_md;
 
/*
* Enable DPIO clock input. We should never disable the reference
* clock for pipe B, since VGA hotplug / manual detection depends
* on it.
*/
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
/* We should never disable this, set it here for state tracking */
if (crtc->pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
 
dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->config.dpll_hw_state.dpll_md = dpll_md;
}
 
static void vlv_prepare_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
u32 dpll, mdiv;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
u32 coreclk, reg_val, dpll_md;
u32 coreclk, reg_val;
 
mutex_lock(&dev_priv->dpio_lock);
 
4941,7 → 5578,7
/* See eDP HDMI DPIO driver vbios notes doc */
 
/* PLL B needs special handling */
if (pipe)
if (pipe == PIPE_B)
vlv_pllb_recal_opamp(dev_priv, pipe);
 
/* Set up Tx target for periodic Rcomp update */
4985,7 → 5622,7
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
/* Use SSC source */
if (!pipe)
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df40000);
else
4993,7 → 5630,7
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (!pipe)
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
0x0df70000);
else
5009,27 → 5646,85
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void chv_update_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
int dpll_reg = DPLL(crtc->pipe);
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, intcoeff;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
int refclk;
 
crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (pipe != PIPE_A)
crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
crtc->config.dpll_hw_state.dpll_md =
(crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 
bestn = crtc->config.dpll.n;
bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
bestm1 = crtc->config.dpll.m1;
bestm2 = crtc->config.dpll.m2 >> 22;
bestp1 = crtc->config.dpll.p1;
bestp2 = crtc->config.dpll.p2;
 
/*
* Enable DPIO clock input. We should never disable the reference
* clock for pipe B, since VGA hotplug / manual detection depends
* on it.
* Enable Refclk and SSC
*/
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
/* We should never disable this, set it here for state tracking */
if (pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
I915_WRITE(dpll_reg,
crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
 
dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->config.dpll_hw_state.dpll_md = dpll_md;
mutex_lock(&dev_priv->dpio_lock);
 
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
/* p1 and p2 divider */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5 << DPIO_CHV_S1_DIV_SHIFT |
bestp1 << DPIO_CHV_P1_DIV_SHIFT |
bestp2 << DPIO_CHV_P2_DIV_SHIFT |
1 << DPIO_CHV_K_DIV_SHIFT);
 
/* Feedback post-divider - m2 */
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
 
/* Feedback refclk divider - n and m1 */
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
DPIO_CHV_M1_DIV_BY_2 |
1 << DPIO_CHV_N_DIV_SHIFT);
 
/* M2 fraction division */
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
 
/* M2 fraction division enable */
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
DPIO_CHV_FRAC_DIV_EN |
(2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
 
/* Loop filter */
refclk = i9xx_get_refclk(&crtc->base, 0);
loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
2 << DPIO_CHV_GAIN_CTRL_SHIFT;
if (refclk == 100000)
intcoeff = 11;
else if (refclk == 38400)
intcoeff = 10;
else
intcoeff = 9;
loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
 
/* AFC Recal */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
DPIO_AFC_RECAL);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
5107,9 → 5802,6
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->config.dpll_hw_state.dpll_md = dpll_md;
}
 
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
}
 
static void i8xx_update_pll(struct intel_crtc *crtc,
5157,7 → 5849,8
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
uint32_t crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
 
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
5164,14 → 5857,18
crtc_vtotal = adjusted_mode->crtc_vtotal;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
 
if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
/* the chip adds 2 halflines automatically */
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
vsyncshift = adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal / 2;
} else {
vsyncshift = 0;
 
if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
adjusted_mode->crtc_htotal / 2;
if (vsyncshift < 0)
vsyncshift += adjusted_mode->crtc_htotal;
}
 
if (INTEL_INFO(dev)->gen > 3)
5255,25 → 5952,23
pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
}
 
static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config)
{
struct drm_crtc *crtc = &intel_crtc->base;
mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
 
crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
 
crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
mode->flags = pipe_config->adjusted_mode.flags;
 
crtc->mode.flags = pipe_config->adjusted_mode.flags;
 
crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
crtc->mode.flags |= pipe_config->adjusted_mode.flags;
mode->clock = pipe_config->adjusted_mode.crtc_clock;
mode->flags |= pipe_config->adjusted_mode.flags;
}
 
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5323,10 → 6018,13
}
}
 
if (!IS_GEN2(dev) &&
intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
} else
pipeconf |= PIPECONF_PROGRESSIVE;
 
if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
5343,16 → 6041,12
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dspcntr;
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
5368,7 → 6062,7
}
 
if (is_dsi)
goto skip_dpll;
return 0;
 
if (!intel_crtc->config.clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
5413,6 → 6107,8
i8xx_update_pll(intel_crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
chv_update_pll(intel_crtc);
} else if (IS_VALLEYVIEW(dev)) {
vlv_update_pll(intel_crtc);
} else {
5421,37 → 6117,9
num_connectors);
}
 
skip_dpll:
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
if (!IS_VALLEYVIEW(dev)) {
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
return 0;
}
 
intel_set_pipe_timings(intel_crtc);
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
 
i9xx_set_pipeconf(intel_crtc);
 
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
 
ret = intel_pipe_set_base(crtc, x, y, fb);
 
return ret;
}
 
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
5492,6 → 6160,10
u32 mdiv;
int refclk = 100000;
 
/* In case of MIPI DPLL will not even be used */
if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
return;
 
mutex_lock(&dev_priv->dpio_lock);
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
mutex_unlock(&dev_priv->dpio_lock);
5508,6 → 6180,97
pipe_config->port_clock = clock.dot / 5;
}
 
static void i9xx_get_plane_config(struct intel_crtc *crtc,
struct intel_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
int aligned_height;
 
crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
if (!crtc->base.primary->fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
 
val = I915_READ(DSPCNTR(plane));
 
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
plane_config->tiled = true;
 
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = intel_format_to_fourcc(pixel_format);
crtc->base.primary->fb->pixel_format = fourcc;
crtc->base.primary->fb->bits_per_pixel =
drm_format_plane_cpp(fourcc, 0) * 8;
 
if (INTEL_INFO(dev)->gen >= 4) {
if (plane_config->tiled)
offset = I915_READ(DSPTILEOFF(plane));
else
offset = I915_READ(DSPLINOFF(plane));
base = I915_READ(DSPSURF(plane)) & 0xfffff000;
} else {
base = I915_READ(DSPADDR(plane));
}
plane_config->base = base;
 
val = I915_READ(PIPESRC(pipe));
crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
 
val = I915_READ(DSPSTRIDE(pipe));
crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
 
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
 
plane_config->size = 16*1024*1024;
 
 
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
crtc->base.primary->fb->height,
crtc->base.primary->fb->bits_per_pixel, base,
crtc->base.primary->fb->pitches[0],
plane_config->size);
 
}
 
static void chv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
intel_clock_t clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
int refclk = 100000;
 
mutex_lock(&dev_priv->dpio_lock);
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
mutex_unlock(&dev_priv->dpio_lock);
 
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
 
chv_clock(refclk, &clock);
 
/* clock.dot is the fast clock */
pipe_config->port_clock = clock.dot / 5;
}
 
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
5515,6 → 6278,10
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
5538,6 → 6305,9
}
}
 
if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
 
if (INTEL_INFO(dev)->gen < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
 
5573,7 → 6343,9
DPLL_PORTB_READY_MASK);
}
 
if (IS_VALLEYVIEW(dev))
if (IS_CHERRYVIEW(dev))
chv_crtc_clock_get(crtc, pipe_config);
else if (IS_VALLEYVIEW(dev))
vlv_crtc_clock_get(crtc, pipe_config);
else
i9xx_crtc_clock_get(crtc, pipe_config);
5694,8 → 6466,7
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
}
else
} else
val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6176,7 → 6947,7
* is 2.5%; use 5% for safety's sake.
*/
u32 bps = target_clock * bpp * 21 / 20;
return bps / (link_bw * 8) + 1;
return DIV_ROUND_UP(bps, link_bw * 8);
}
 
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6274,10 → 7045,7
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
6285,7 → 7053,6
bool is_lvds = false;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
int ret;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
6335,38 → 7102,20
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(pipe));
pipe_name(intel_crtc->pipe));
return -EINVAL;
}
} else
intel_put_shared_dpll(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
if (is_lvds && has_reduced_clock && i915_powersave)
if (is_lvds && has_reduced_clock && i915.powersave)
intel_crtc->lowfreq_avail = true;
else
intel_crtc->lowfreq_avail = false;
 
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
return 0;
}
 
ironlake_set_pipeconf(crtc);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
ret = intel_pipe_set_base(crtc, x, y, fb);
 
return ret;
}
 
static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
6451,6 → 7200,65
}
}
 
static void ironlake_get_plane_config(struct intel_crtc *crtc,
struct intel_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
int aligned_height;
 
crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
if (!crtc->base.primary->fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
 
val = I915_READ(DSPCNTR(plane));
 
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
plane_config->tiled = true;
 
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = intel_format_to_fourcc(pixel_format);
crtc->base.primary->fb->pixel_format = fourcc;
crtc->base.primary->fb->bits_per_pixel =
drm_format_plane_cpp(fourcc, 0) * 8;
 
base = I915_READ(DSPSURF(plane)) & 0xfffff000;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
offset = I915_READ(DSPOFFSET(plane));
} else {
if (plane_config->tiled)
offset = I915_READ(DSPTILEOFF(plane));
else
offset = I915_READ(DSPLINOFF(plane));
}
plane_config->base = base;
 
val = I915_READ(PIPESRC(pipe));
crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
 
val = I915_READ(DSPSTRIDE(pipe));
crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
 
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
 
plane_config->size = 16*1024*1024;
 
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
crtc->base.primary->fb->height,
crtc->base.primary->fb->bits_per_pixel, base,
crtc->base.primary->fb->pitches[0],
plane_config->size);
}
 
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
6458,6 → 7266,10
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
6482,6 → 7294,9
break;
}
 
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
 
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
 
6529,22 → 7344,20
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
unsigned long irqflags;
uint32_t val;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
for_each_intel_crtc(dev, crtc)
WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
 
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
WARN(plls->spll_refcount, "SPLL enabled\n");
WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev))
WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
6553,16 → 7366,41
"Utility pin enabled\n");
WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
val = I915_READ(DEIMR);
WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
"Unexpected DEIMR bits enabled: 0x%x\n", val);
val = I915_READ(SDEIMR);
WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
"Unexpected SDEIMR bits enabled: 0x%x\n", val);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/*
* In theory we can still leave IRQs enabled, as long as only the HPD
* interrupts remain enabled. We used to check for that, but since it's
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
 
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
if (IS_HASWELL(dev))
return I915_READ(D_COMP_HSW);
else
return I915_READ(D_COMP_BDW);
}
 
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
struct drm_device *dev = dev_priv->dev;
 
if (IS_HASWELL(dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
DRM_ERROR("Failed to write to D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
I915_WRITE(D_COMP_BDW, val);
POSTING_READ(D_COMP_BDW);
}
}
 
/*
* This function implements pieces of two sequences from BSpec:
* - Sequence for display software to disable LCPLL
6598,16 → 7436,13
if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
DRM_ERROR("LCPLL still locked\n");
 
val = I915_READ(D_COMP);
val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
DRM_ERROR("Failed to disable D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
POSTING_READ(D_COMP);
delay(1);
hsw_write_dcomp(dev_priv, val);
ndelay(100);
 
if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
1))
DRM_ERROR("D_COMP RCOMP still in progress\n");
 
if (allow_power_down) {
6625,6 → 7460,7
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
unsigned long irqflags;
 
val = I915_READ(LCPLL_CTL);
 
6632,9 → 7468,22
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
return;
 
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
/*
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
*
* The other problem is that hsw_restore_lcpll() is called as part of
* the runtime PM resume sequence, so we can't just call
* gen6_gt_force_wake_get() because that function calls
* intel_runtime_pm_get(), and we can't change the runtime PM refcount
* while we are on the resume sequence. So to solve this problem we have
* to call special forcewake code that doesn't touch runtime PM and
* doesn't enable the forcewake delayed work.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
6642,14 → 7491,10
POSTING_READ(LCPLL_CTL);
}
 
val = I915_READ(D_COMP);
val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
DRM_ERROR("Failed to enable D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
POSTING_READ(D_COMP);
hsw_write_dcomp(dev_priv, val);
 
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
6668,26 → 7513,43
DRM_ERROR("Switching back to LCPLL failed\n");
}
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
/* See the big comment above. */
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
void hsw_enable_pc8_work(struct work_struct *__work)
/*
* Package states C8 and deeper are really deep PC states that can only be
* reached when all the devices on the system allow it, so even if the graphics
* device allows PC8+, it doesn't mean the system will actually get to these
* states. Our driver only allows PC8+ when going into runtime PM.
*
* The requirements for PC8+ are that all the outputs are disabled, the power
* well is disabled and most interrupts are disabled, and these are also
* requirements for runtime PM. When these conditions are met, we manually do
* the other conditions: disable the interrupts, clocks and switch LCPLL refclk
* to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
* hang the machine.
*
* When we really reach PC8 or deeper states (not just when we allow it) we lose
* the state of some registers, so when we come back from PC8+ we need to
* restore this state. We don't get into PC8+ if we're not in RC6, so we don't
* need to take care of the registers kept by RC6. Notice that this happens even
* if we don't put the device in PCI D3 state (which is what currently happens
* because of the runtime PM support).
*
* For more, read "Display Sequences for Package C8" on the hardware
* documentation.
*/
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv =
container_of(to_delayed_work(__work), struct drm_i915_private,
pc8.enable_work);
struct drm_device *dev = dev_priv->dev;
uint32_t val;
 
WARN_ON(!HAS_PC8(dev));
 
if (dev_priv->pc8.enabled)
return;
 
DRM_DEBUG_KMS("Enabling package C8+\n");
 
dev_priv->pc8.enabled = true;
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6695,51 → 7557,17
}
 
lpt_disable_clkout_dp(dev);
hsw_pc8_disable_interrupts(dev);
hsw_disable_lcpll(dev_priv, true, true);
 
intel_runtime_pm_put(dev_priv);
}
 
static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
WARN(dev_priv->pc8.disable_count < 1,
"pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
 
dev_priv->pc8.disable_count--;
if (dev_priv->pc8.disable_count != 0)
return;
 
schedule_delayed_work(&dev_priv->pc8.enable_work,
msecs_to_jiffies(i915_pc8_timeout));
}
 
static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t val;
 
WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
WARN(dev_priv->pc8.disable_count < 0,
"pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
 
dev_priv->pc8.disable_count++;
if (dev_priv->pc8.disable_count != 1)
return;
 
WARN_ON(!HAS_PC8(dev));
 
cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
if (!dev_priv->pc8.enabled)
return;
 
DRM_DEBUG_KMS("Disabling package C8+\n");
 
intel_runtime_pm_get(dev_priv);
 
hsw_restore_lcpll(dev_priv);
hsw_pc8_restore_interrupts(dev);
lpt_init_pch_refclk(dev);
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6749,230 → 7577,79
}
 
intel_prepare_ddi(dev);
i915_gem_init_swizzling(dev);
mutex_lock(&dev_priv->rps.hw_lock);
gen6_update_ring_freq(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
dev_priv->pc8.enabled = false;
}
 
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
static void snb_modeset_global_resources(struct drm_device *dev)
{
if (!HAS_PC8(dev_priv->dev))
return;
 
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
modeset_update_crtc_power_domains(dev);
}
 
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
static void haswell_modeset_global_resources(struct drm_device *dev)
{
if (!HAS_PC8(dev_priv->dev))
return;
 
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
modeset_update_crtc_power_domains(dev);
}
 
static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
uint32_t val;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
if (crtc->base.enabled)
return false;
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
 
/* This case is still possible since we have the i915.disable_power_well
* parameter and also the KVMr or something else might be requesting the
* power well. */
val = I915_READ(HSW_PWR_WELL_DRIVER);
if (val != 0) {
DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
return false;
}
intel_crtc->lowfreq_avail = false;
 
return true;
return 0;
}
 
/* Since we're called from modeset_global_resources there's no way to
* symmetrically increase and decrease the refcount, so we use
* dev_priv->pc8.requirements_met to track whether we already have the refcount
* or not.
*/
static void hsw_update_package_c8(struct drm_device *dev)
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
 
if (!HAS_PC8(dev_priv->dev))
return;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
 
if (!i915_enable_pc8)
return;
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
mutex_lock(&dev_priv->pc8.lock);
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
allow = hsw_can_enable_package_c8(dev_priv);
 
if (allow == dev_priv->pc8.requirements_met)
goto done;
 
dev_priv->pc8.requirements_met = allow;
 
if (allow)
__hsw_enable_package_c8(dev_priv);
else
__hsw_disable_package_c8(dev_priv);
 
done:
mutex_unlock(&dev_priv->pc8.lock);
switch (pipe_config->ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
pipe_config->shared_dpll = DPLL_ID_WRPLL1;
break;
case PORT_CLK_SEL_WRPLL2:
pipe_config->shared_dpll = DPLL_ID_WRPLL2;
break;
}
 
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
 
mutex_lock(&dev_priv->pc8.lock);
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
__hsw_enable_package_c8(dev_priv);
WARN_ON(!pll->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
}
mutex_unlock(&dev_priv->pc8.lock);
}
 
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
 
mutex_lock(&dev_priv->pc8.lock);
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
__hsw_disable_package_c8(dev_priv);
}
mutex_unlock(&dev_priv->pc8.lock);
}
 
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
 
static unsigned long get_pipe_power_domains(struct drm_device *dev,
enum pipe pipe, bool pfit_enabled)
{
unsigned long mask;
enum transcoder transcoder;
 
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (pfit_enabled)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 
return mask;
}
 
void intel_display_set_init_power(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->power_domains.init_power_on == enable)
return;
 
if (enable)
intel_display_power_get(dev, POWER_DOMAIN_INIT);
else
intel_display_power_put(dev, POWER_DOMAIN_INIT);
 
dev_priv->power_domains.init_power_on = enable;
}
 
static void modeset_update_power_wells(struct drm_device *dev)
{
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
 
/*
* First get all needed power domains, then put all unneeded, to avoid
* any unnecessary toggling of the power wells.
* Haswell has only FDI/PCH transcoder A. It is which is connected to
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
enum intel_display_power_domain domain;
if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
 
if (!crtc->base.enabled)
continue;
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
crtc->pipe,
crtc->config.pch_pfit.enabled);
 
for_each_power_domain(domain, pipe_domains[crtc->pipe])
intel_display_power_get(dev, domain);
ironlake_get_fdi_m_n_config(crtc, pipe_config);
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
enum intel_display_power_domain domain;
 
for_each_power_domain(domain, crtc->enabled_power_domains)
intel_display_power_put(dev, domain);
 
crtc->enabled_power_domains = pipe_domains[crtc->pipe];
}
 
intel_display_set_init_power(dev, false);
}
 
static void haswell_modeset_global_resources(struct drm_device *dev)
{
modeset_update_power_wells(dev);
hsw_update_package_c8(dev);
}
 
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane;
int ret;
 
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
intel_ddi_pll_enable(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_crtc->lowfreq_avail = false;
 
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
}
 
haswell_set_pipeconf(crtc);
 
intel_set_pipe_csc(crtc);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
ret = intel_pipe_set_base(crtc, x, y, fb);
 
return ret;
}
 
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
6981,6 → 7658,10
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
 
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
 
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
7006,7 → 7687,7
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
 
if (!intel_display_power_enabled(dev,
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
 
7014,27 → 7695,12
if (!(tmp & PIPECONF_ENABLE))
return false;
 
/*
* Haswell has only FDI/PCH transcoder A. It is which is connected to
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
haswell_get_ddi_port_state(crtc, pipe_config);
 
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
ironlake_get_fdi_m_n_config(crtc, pipe_config);
}
 
intel_get_pipe_timings(crtc, pipe_config);
 
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_enabled(dev, pfit_domain))
if (intel_display_power_enabled(dev_priv, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
 
if (IS_HASWELL(dev))
7046,38 → 7712,6
return true;
}
 
static int intel_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int ret;
 
drm_vblank_pre_modeset(dev, pipe);
 
ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
 
drm_vblank_post_modeset(dev, pipe);
 
if (ret != 0)
return ret;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
encoder->mode_set(encoder);
}
 
return 0;
}
 
static struct {
int clock;
u32 config;
7192,8 → 7826,6
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t eldv;
uint32_t i;
int len;
7205,17 → 7837,14
int aud_config = HSW_AUD_CFG(pipe);
int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
 
 
DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
 
/* Audio output enable */
DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
tmp = I915_READ(aud_cntrl_st2);
tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
POSTING_READ(aud_cntrl_st2);
 
/* Wait for 1 vertical blank */
intel_wait_for_vblank(dev, pipe);
assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
 
/* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2);
7235,7 → 7864,6
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
intel_crtc->eld_vld = true;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7382,9 → 8010,9
 
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector),
connector->name,
connector->encoder->base.id,
drm_get_encoder_name(connector->encoder));
connector->encoder->name);
 
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 
7397,30 → 8025,34
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool visible = base != 0;
u32 cntl;
uint32_t cntl;
 
if (intel_crtc->cursor_visible == visible)
return;
 
cntl = I915_READ(_CURACNTR);
if (visible) {
if (base != intel_crtc->cursor_base) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
if (intel_crtc->cursor_cntl) {
I915_WRITE(_CURACNTR, 0);
POSTING_READ(_CURACNTR);
intel_crtc->cursor_cntl = 0;
}
 
I915_WRITE(_CURABASE, base);
POSTING_READ(_CURABASE);
}
 
cntl &= ~(CURSOR_FORMAT_MASK);
/* XXX width must be 64, stride 256 => 0x00 << 28 */
cntl |= CURSOR_ENABLE |
cntl = 0;
if (base)
cntl = (CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB;
} else
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
CURSOR_FORMAT_ARGB);
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(_CURACNTR, cntl);
 
intel_crtc->cursor_visible = visible;
POSTING_READ(_CURACNTR);
intel_crtc->cursor_cntl = cntl;
}
}
 
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
{
7428,24 → 8060,34
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
uint32_t cntl;
 
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(CURCNTR(pipe));
cntl = 0;
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
cntl = MCURSOR_GAMMA_ENABLE;
switch (intel_crtc->cursor_width) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
case 128:
cntl |= CURSOR_MODE_128_ARGB_AX;
break;
case 256:
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
WARN_ON(1);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
POSTING_READ(CURCNTR(pipe));
intel_crtc->cursor_cntl = cntl;
}
 
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
POSTING_READ(CURCNTR(pipe));
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
}
7456,33 → 8098,42
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
uint32_t cntl;
 
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
cntl = 0;
if (base) {
cntl &= ~CURSOR_MODE;
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
cntl = MCURSOR_GAMMA_ENABLE;
switch (intel_crtc->cursor_width) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
case 128:
cntl |= CURSOR_MODE_128_ARGB_AX;
break;
case 256:
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
WARN_ON(1);
return;
}
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
}
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
 
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
POSTING_READ(CURCNTR(pipe));
intel_crtc->cursor_cntl = cntl;
}
I915_WRITE(CURCNTR_IVB(pipe), cntl);
 
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
POSTING_READ(CURCNTR_IVB(pipe));
I915_WRITE(CURBASE_IVB(pipe), base);
POSTING_READ(CURBASE_IVB(pipe));
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
}
 
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
void intel_crtc_update_cursor(struct drm_crtc *crtc,
bool on)
{
struct drm_device *dev = crtc->dev;
7489,10 → 8140,9
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
int x = crtc->cursor_x;
int y = crtc->cursor_y;
u32 base = 0, pos = 0;
bool visible;
 
if (on)
base = intel_crtc->cursor_addr;
7521,34 → 8171,41
}
pos |= y << CURSOR_Y_SHIFT;
 
visible = base != 0;
if (!visible && !intel_crtc->cursor_visible)
if (base == 0 && intel_crtc->cursor_base == 0)
return;
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
I915_WRITE(CURPOS(pipe), pos);
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
ivb_update_cursor(crtc, base);
} else {
I915_WRITE(CURPOS(pipe), pos);
else if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
intel_crtc->cursor_base = base;
}
}
 
#if 0
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file,
uint32_t handle,
/*
* intel_crtc_cursor_set_obj - Set cursor to specified GEM object
*
* Note that the object's reference will be consumed if the update fails. If
* the update succeeds, the reference of the old object (if any) will be
* consumed.
*/
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
uint32_t addr;
int ret;
 
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
if (!obj) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
7556,18 → 8213,16
goto finish;
}
 
/* Currently we only support 64x64 cursors */
if (width != 64 || height != 64) {
DRM_ERROR("we currently only support 64x64 cursors\n");
/* Check for which cursor types we support */
if (!((width == 64 && height == 64) ||
(width == 128 && height == 128 && !IS_GEN2(dev)) ||
(width == 256 && height == 256 && !IS_GEN2(dev)))) {
DRM_DEBUG("Cursor dimension not supported\n");
return -EINVAL;
}
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
if (&obj->base == NULL)
return -ENOENT;
 
if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
DRM_DEBUG_KMS("buffer is too small\n");
ret = -ENOMEM;
goto fail;
}
7574,11 → 8229,11
 
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
if (!INTEL_INFO(dev)->cursor_needs_physical) {
unsigned alignment;
 
if (obj->tiling_mode) {
DRM_ERROR("cursor cannot be tiled\n");
DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
7594,13 → 8249,13
 
ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
goto fail_locked;
}
 
ret = i915_gem_object_put_fence(obj);
if (ret) {
DRM_ERROR("failed to release fence for cursor");
DRM_DEBUG_KMS("failed to release fence for cursor");
goto fail_unpin;
}
 
7607,15 → 8262,13
addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
// ret = i915_gem_object_attach_phys(obj, align);
// if (ret) {
// DRM_DEBUG_KMS("failed to attach phys object\n");
// goto fail_locked;
// }
// addr = obj->phys_handle->busaddr;
}
addr = obj->phys_obj->handle->busaddr;
}
 
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
7622,23 → 8275,26
 
finish:
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
if (!INTEL_INFO(dev)->cursor_needs_physical)
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
 
i915_gem_track_fb(intel_crtc->cursor_bo, obj,
INTEL_FRONTBUFFER_CURSOR(pipe));
mutex_unlock(&dev->struct_mutex);
 
old_width = intel_crtc->cursor_width;
 
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
 
if (intel_crtc->active)
if (intel_crtc->active) {
if (old_width != width)
intel_update_watermarks(crtc);
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
}
 
return 0;
fail_unpin:
7649,21 → 8305,7
drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
#endif
 
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
 
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
return 0;
}
 
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
7686,7 → 8328,7
};
 
struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
7699,12 → 8341,7
return ERR_PTR(-ENOMEM);
}
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto err;
 
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
mutex_unlock(&dev->struct_mutex);
if (ret)
goto err;
 
7716,6 → 8353,23
return ERR_PTR(ret);
}
 
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct drm_framebuffer *fb;
int ret;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
fb = __intel_framebuffer_create(dev, mode_cmd, obj);
mutex_unlock(&dev->struct_mutex);
 
return fb;
}
 
static u32
intel_framebuffer_pitch_for_width(int width, int bpp)
{
7727,7 → 8381,7
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
return PAGE_ALIGN(pitch * mode->vdisplay);
}
 
static struct drm_framebuffer *
7738,7 → 8392,18
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
return NULL;
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
if (obj == NULL)
return ERR_PTR(-ENOMEM);
 
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
bpp);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
 
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
 
static struct drm_framebuffer *
7750,14 → 8415,16
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
 
if (dev_priv->fbdev == NULL)
if (!dev_priv->fbdev)
return NULL;
 
obj = dev_priv->fbdev->ifb.obj;
if (obj == NULL)
if (!dev_priv->fbdev->fb)
return NULL;
 
fb = &dev_priv->fbdev->ifb.base;
obj = dev_priv->fbdev->fb->obj;
BUG_ON(!obj);
 
fb = &dev_priv->fbdev->fb->base;
if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
fb->bits_per_pixel))
return NULL;
7773,7 → 8440,8
 
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old)
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *intel_crtc;
struct intel_encoder *intel_encoder =
7783,12 → 8451,18
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
int i = -1;
struct drm_mode_config *config = &dev->mode_config;
int ret, i = -1;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
connector->base.id, connector->name,
encoder->base.id, encoder->name);
 
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail_unlock;
 
/*
* Algorithm gets a little messy:
*
7803,7 → 8477,9
if (encoder->crtc) {
crtc = encoder->crtc;
 
mutex_lock(&crtc->mutex);
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
 
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
7816,15 → 8492,19
}
 
/* Find an unused one (if possible) */
list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
for_each_crtc(dev, possible_crtc) {
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
if (!possible_crtc->enabled) {
if (possible_crtc->enabled)
continue;
/* This can occur when applying the pipe A quirk on resume. */
if (to_intel_crtc(possible_crtc)->new_enabled)
continue;
 
crtc = possible_crtc;
break;
}
}
 
/*
* If we didn't find an unused CRTC, don't use any.
7831,14 → 8511,18
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
return false;
goto fail_unlock;
}
 
mutex_lock(&crtc->mutex);
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
 
intel_crtc = to_intel_crtc(crtc);
intel_crtc->new_enabled = true;
intel_crtc->new_config = &intel_crtc->config;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
7862,8 → 8546,7
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
mutex_unlock(&crtc->mutex);
return false;
goto fail;
}
 
if (intel_set_mode(crtc, mode, 0, 0, fb)) {
7870,15 → 8553,28
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
mutex_unlock(&crtc->mutex);
return false;
goto fail;
}
 
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
 
fail:
intel_crtc->new_enabled = crtc->enabled;
if (intel_crtc->new_enabled)
intel_crtc->new_config = &intel_crtc->config;
else
intel_crtc->new_config = NULL;
fail_unlock:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
 
return false;
}
 
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old)
{
7886,14 → 8582,17
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
connector->base.id, connector->name,
encoder->base.id, encoder->name);
 
if (old->load_detect_temp) {
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_crtc->new_enabled = false;
intel_crtc->new_config = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
 
if (old->release_fb) {
7901,7 → 8600,6
drm_framebuffer_unreference(old->release_fb);
}
 
mutex_unlock(&crtc->mutex);
return;
}
 
7908,8 → 8606,6
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
 
mutex_unlock(&crtc->mutex);
}
 
static int i9xx_pll_refclk(struct drm_device *dev,
8103,16 → 8799,14
return mode;
}
 
static void intel_increase_pllclock(struct drm_crtc *crtc)
static void intel_increase_pllclock(struct drm_device *dev,
enum pipe pipe)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_reg = DPLL(pipe);
int dpll;
 
if (HAS_PCH_SPLIT(dev))
if (!HAS_GMCH_DISPLAY(dev))
return;
 
if (!dev_priv->lvds_downclock_avail)
8137,10 → 8831,10
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (HAS_PCH_SPLIT(dev))
if (!HAS_GMCH_DISPLAY(dev))
return;
 
if (!dev_priv->lvds_downclock_avail)
8174,8 → 8868,12
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
hsw_package_c8_gpu_busy(dev_priv);
if (dev_priv->mm.busy)
return;
 
intel_runtime_pm_get(dev_priv);
i915_update_gfx_val(dev_priv);
dev_priv->mm.busy = true;
}
 
void intel_mark_idle(struct drm_device *dev)
8183,44 → 8881,118
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
hsw_package_c8_gpu_idle(dev_priv);
 
if (!i915_powersave)
if (!dev_priv->mm.busy)
return;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
dev_priv->mm.busy = false;
 
if (!i915.powersave)
goto out;
 
for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
 
intel_decrease_pllclock(crtc);
}
 
if (dev_priv->info->gen >= 6)
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
 
out:
intel_runtime_pm_put(dev_priv);
}
 
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
 
/**
* intel_mark_fb_busy - mark given planes as busy
* @dev: DRM device
* @frontbuffer_bits: bits for the affected planes
* @ring: optional ring for asynchronous commands
*
* This function gets called every time the screen contents change. It can be
* used to keep e.g. the update rate at the nominal refresh rate with DRRS.
*/
static void intel_mark_fb_busy(struct drm_device *dev,
unsigned frontbuffer_bits,
struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
enum pipe pipe;
 
if (!i915_powersave)
if (!i915.powersave)
return;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
for_each_pipe(pipe) {
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
 
if (to_intel_framebuffer(crtc->fb)->obj != obj)
continue;
 
intel_increase_pllclock(crtc);
intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
 
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
* be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
if (!obj->frontbuffer_bits)
return;
 
if (ring) {
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.busy_bits
|= obj->frontbuffer_bits;
dev_priv->fb_tracking.flip_bits
&= ~obj->frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
}
 
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
 
intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
}
 
/**
* intel_frontbuffer_flush - flush frontbuffer
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called every time rendering on the given planes has
* completed and frontbuffer caching can be started again. Flushes will get
* delayed if they're blocked by some oustanding asynchronous rendering.
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Delay flushing when rings are still busy.*/
mutex_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
 
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
 
intel_edp_psr_flush(dev, frontbuffer_bits);
}
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8249,6 → 9021,7
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
 
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
8267,7 → 9040,7
static void do_intel_finish_page_flip(struct drm_device *dev,
struct drm_crtc *crtc)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
8295,7 → 9068,7
if (work->event)
drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
 
drm_vblank_put(dev, intel_crtc->pipe);
drm_crtc_vblank_put(crtc);
 
spin_unlock_irqrestore(&dev->event_lock, flags);
 
8308,7 → 9081,7
 
void intel_finish_page_flip(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
do_intel_finish_page_flip(dev, crtc);
8316,15 → 9089,57
 
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
 
do_intel_finish_page_flip(dev, crtc);
}
 
/* Is 'a' after or equal to 'b'? */
static bool g4x_flip_count_after_eq(u32 a, u32 b)
{
return !((a - b) & 0x80000000);
}
 
static bool page_flip_finished(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* The relevant registers doen't exist on pre-ctg.
* As the flip done interrupt doesn't trigger for mmio
* flips on gmch platforms, a flip count check isn't
* really needed there. But since ctg has the registers,
* include it in the check anyway.
*/
if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
return true;
 
/*
* A DSPSURFLIVE check isn't enough in case the mmio and CS flips
* used the same base address. In that case the mmio flip might
* have completed, but the CS hasn't even executed the flip yet.
*
* A flip count check isn't enough as the CS might have updated
* the base address just after start of vblank, but before we
* managed to process the interrupt. This means we'd complete the
* CS flip too soon.
*
* Combining both checks should get us a good enough result. It may
* still happen that the CS flip has been executed, but has not
* yet actually completed. But in case the base address is the same
* anyway, we don't really care.
*/
return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
crtc->unpin_work->gtt_offset &&
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
crtc->unpin_work->flip_count);
}
 
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
8334,12 → 9149,12
* is also accompanied by a spurious intel_prepare_page_flip().
*/
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work)
if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
 
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
{
/* Ensure that the work item is consistent when activating it ... */
smp_wmb();
8352,21 → 9167,16
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
return ret;
 
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
8380,17 → 9190,12
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
 
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_gen3_queue_flip(struct drm_device *dev,
8397,21 → 9202,16
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
return ret;
 
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8422,17 → 9222,12
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, MI_NOOP);
 
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_gen4_queue_flip(struct drm_device *dev,
8439,21 → 9234,17
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;
return ret;
 
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
8462,8 → 9253,7
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
(i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
obj->tiling_mode);
 
/* XXX Enabling the panel-fitter across page-flip is so far
8477,11 → 9267,6
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_gen6_queue_flip(struct drm_device *dev,
8488,26 → 9273,22
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;
return ret;
 
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
 
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
8522,11 → 9303,6
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_gen7_queue_flip(struct drm_device *dev,
8533,22 → 9309,13
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
int len, ret;
 
ring = obj->ring;
if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
switch(intel_crtc->plane) {
case PLANE_A:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
8561,17 → 9328,38
break;
default:
WARN_ONCE(1, "unknown plane in flip command\n");
ret = -ENODEV;
goto err_unpin;
return -ENODEV;
}
 
len = 4;
if (ring->id == RCS)
if (ring->id == RCS) {
len += 6;
/*
* On Gen 8, SRM is now taking an extra dword to accommodate
* 48bits addresses, and we need a NOOP for the batch size to
* stay even.
*/
if (IS_GEN8(dev))
len += 2;
}
 
/*
* BSpec MI_DISPLAY_FLIP for IVB:
* "The full packet must be contained within the same cache line."
*
* Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
* cacheline, if we ever start emitting more commands before
* the MI_DISPLAY_FLIP we may need to first emit everything else,
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
ret = intel_ring_cacheline_align(ring);
if (ret)
return ret;
 
ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
return ret;
 
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
8588,25 → 9376,28
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
MI_SRM_LRM_GLOBAL_GTT);
else
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
if (IS_GEN8(dev)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
}
}
 
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
 
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_default_queue_flip(struct drm_device *dev,
8613,6 → 9404,7
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
return -ENODEV;
8625,15 → 9417,25
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *old_fb = crtc->fb;
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
unsigned long flags;
int ret;
 
/*
* drm_mode_page_flip_ioctl() should already catch this, but double
* check to be safe. In the future we may enable pageflipping from
* a disabled primary plane.
*/
if (WARN_ON(intel_fb_obj(old_fb) == NULL))
return -EBUSY;
 
/* Can't change pixel format via MI display flips. */
if (fb->pixel_format != crtc->fb->pixel_format)
if (fb->pixel_format != crtc->primary->fb->pixel_format)
return -EINVAL;
 
/*
8641,8 → 9443,8
* Note that pitch changes could also affect these register.
*/
if (INTEL_INFO(dev)->gen > 3 &&
(fb->offsets[0] != crtc->fb->offsets[0] ||
fb->pitches[0] != crtc->fb->pitches[0]))
(fb->offsets[0] != crtc->primary->fb->offsets[0] ||
fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
 
work = kzalloc(sizeof(*work), GFP_KERNEL);
8651,10 → 9453,10
 
work->event = event;
work->crtc = crtc;
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
work->old_fb_obj = intel_fb_obj(old_fb);
INIT_WORK(&work->work, intel_unpin_work_fn);
 
ret = drm_vblank_get(dev, intel_crtc->pipe);
ret = drm_crtc_vblank_get(crtc);
if (ret)
goto free_work;
 
8663,7 → 9465,7
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
drm_vblank_put(dev, intel_crtc->pipe);
drm_crtc_vblank_put(crtc);
 
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
8682,7 → 9484,7
drm_gem_object_reference(&work->old_fb_obj->base);
drm_gem_object_reference(&obj->base);
 
crtc->fb = fb;
crtc->primary->fb = fb;
 
work->pending_flip_obj = obj;
 
8691,12 → 9493,45
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
 
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
} else if (IS_IVYBRIDGE(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
} else {
ring = &dev_priv->ring[RCS];
}
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto cleanup_pending;
 
work->gtt_offset =
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
 
if (use_mmio_flip(ring, obj))
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
else
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
if (ret)
goto cleanup_unpin;
 
i915_gem_track_fb(work->old_fb_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
 
intel_disable_fbc(dev);
intel_mark_fb_busy(obj, NULL);
intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
 
trace_i915_flip_request(intel_crtc->plane, obj);
8703,9 → 9538,11
 
return 0;
 
cleanup_unpin:
intel_unpin_fb_obj(obj);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
crtc->fb = old_fb;
crtc->primary->fb = old_fb;
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
8715,10 → 9552,17
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
 
drm_vblank_put(dev, intel_crtc->pipe);
drm_crtc_vblank_put(crtc);
free_work:
kfree(work);
 
if (ret == -EIO) {
out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event)
drm_send_vblank_event(dev, pipe, event);
}
return ret;
}
#endif
8736,6 → 9580,7
*/
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
{
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
 
8750,7 → 9595,16
encoder->new_crtc =
to_intel_crtc(encoder->base.crtc);
}
 
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = crtc->base.enabled;
 
if (crtc->new_enabled)
crtc->new_config = &crtc->config;
else
crtc->new_config = NULL;
}
}
 
/**
* intel_modeset_commit_output_state
8759,6 → 9613,7
*/
static void intel_modeset_commit_output_state(struct drm_device *dev)
{
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
 
8771,7 → 9626,11
base.head) {
encoder->base.crtc = &encoder->new_crtc->base;
}
 
for_each_intel_crtc(dev, crtc) {
crtc->base.enabled = crtc->new_enabled;
}
}
 
static void
connected_sink_compute_bpp(struct intel_connector * connector,
8781,7 → 9640,7
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
connector->base.base.id,
drm_get_connector_name(&connector->base));
connector->base.name);
 
/* Don't use an invalid EDID bpc value */
if (connector->base.display_info.bpc &&
8911,23 → 9770,47
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
}
 
static bool check_encoder_cloning(struct drm_crtc *crtc)
static bool encoders_cloneable(const struct intel_encoder *a,
const struct intel_encoder *b)
{
int num_encoders = 0;
bool uncloneable_encoders = false;
/* masks could be asymmetric, so check both ways */
return a == b || (a->cloneable & (1 << b->type) &&
b->cloneable & (1 << a->type));
}
 
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *source_encoder;
 
list_for_each_entry(source_encoder,
&dev->mode_config.encoder_list, base.head) {
if (source_encoder->new_crtc != crtc)
continue;
 
if (!encoders_cloneable(encoder, source_encoder))
return false;
}
 
return true;
}
 
static bool check_encoder_cloning(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
 
list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
base.head) {
if (&encoder->new_crtc->base != crtc)
list_for_each_entry(encoder,
&dev->mode_config.encoder_list, base.head) {
if (encoder->new_crtc != crtc)
continue;
 
num_encoders++;
if (!encoder->cloneable)
uncloneable_encoders = true;
if (!check_single_encoder_cloning(crtc, encoder))
return false;
}
 
return !(num_encoders > 1 && uncloneable_encoders);
return true;
}
 
static struct intel_crtc_config *
8941,7 → 9824,7
int plane_bpp, ret = -EINVAL;
bool retry = true;
 
if (!check_encoder_cloning(crtc)) {
if (!check_encoder_cloning(to_intel_crtc(crtc))) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
return ERR_PTR(-EINVAL);
}
9097,29 → 9980,21
*prepare_pipes |= 1 << encoder->new_crtc->pipe;
}
 
/* Check for any pipes that will be fully disabled ... */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
bool used = false;
 
/* Don't try to disable disabled crtcs. */
if (!intel_crtc->base.enabled)
/* Check for pipes that will be enabled/disabled ... */
for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->base.enabled == intel_crtc->new_enabled)
continue;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->new_crtc == intel_crtc)
used = true;
}
 
if (!used)
if (!intel_crtc->new_enabled)
*disable_pipes |= 1 << intel_crtc->pipe;
else
*prepare_pipes |= 1 << intel_crtc->pipe;
}
 
 
/* set_mode is also used to update properties on life display pipes. */
intel_crtc = to_intel_crtc(crtc);
if (crtc->enabled)
if (intel_crtc->new_enabled)
*prepare_pipes |= 1 << intel_crtc->pipe;
 
/*
9178,10 → 10053,12
 
intel_modeset_commit_output_state(dev);
 
/* Update computed state. */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
/* Double check state. */
for_each_intel_crtc(dev, intel_crtc) {
WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
intel_crtc->new_config != &intel_crtc->config);
WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9306,7 → 10183,13
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
 
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
IS_VALLEYVIEW(dev))
PIPE_CONF_CHECK_I(limited_color_range);
 
PIPE_CONF_CHECK_I(has_audio);
 
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
 
9324,11 → 10207,22
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
 
/*
* FIXME: BIOS likes to set up a cloned config with lvds+external
* screen. Since we don't yet re-compute the pipe config when moving
* just the lvds port away to another pipe the sw tracking won't match.
*
* Proper atomic modesets with recomputed global state will fix this.
* Until then just don't check gmch state for inherited modes.
*/
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
PIPE_CONF_CHECK_I(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
}
 
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos);
9341,19 → 10235,20
 
PIPE_CONF_CHECK_I(double_wide);
 
PIPE_CONF_CHECK_X(ddi_pll_sel);
 
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
 
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
 
if (!HAS_DDI(dev)) {
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
}
 
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
9394,7 → 10289,7
 
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base));
encoder->base.name);
 
WARN(&encoder->new_crtc->base != encoder->base.crtc,
"encoder's stage crtc doesn't match current crtc\n");
9409,6 → 10304,14
if (connector->base.dpms != DRM_MODE_DPMS_OFF)
active = true;
}
/*
* for MST connectors if we unplug the connector is gone
* away but the encoder is still connected to a crtc
* until a modeset happens in response to the hotplug.
*/
if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
continue;
 
WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
9441,13 → 10344,12
static void
check_crtc_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_crtc_config pipe_config;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
for_each_intel_crtc(dev, crtc) {
bool enabled = false;
bool active = false;
 
9509,7 → 10411,7
static void
check_shared_dpll_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_dpll_hw_state dpll_hw_state;
int i;
9536,8 → 10438,7
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
for_each_intel_crtc(dev, crtc) {
if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
9577,12 → 10478,50
pipe_config->adjusted_mode.crtc_clock, dotclock);
}
 
static void update_scanline_offset(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
 
/*
* The scanline counter increments at the leading edge of hsync.
*
* On most platforms it starts counting from vtotal-1 on the
* first active line. That means the scanline counter value is
* always one less than what we would expect. Ie. just after
* start of vblank, which also occurs at start of hsync (on the
* last active line), the scanline counter will read vblank_start-1.
*
* On gen2 the scanline counter starts counting from 1 instead
* of vtotal-1, so we have to subtract one (or rather add vtotal-1
* to keep the value positive), instead of adding one.
*
* On HSW+ the behaviour of the scanline counter depends on the output
* type. For DP ports it behaves like most other platforms, but on HDMI
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
*/
if (IS_GEN2(dev)) {
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
int vtotal;
 
vtotal = mode->crtc_vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
 
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev) &&
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
}
 
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
9613,6 → 10552,7
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
to_intel_crtc(crtc)->new_config = pipe_config;
}
 
/*
9623,8 → 10563,7
* adjusted_mode bits in the crtc directly.
*/
if (IS_VALLEYVIEW(dev)) {
valleyview_modeset_global_pipes(dev, &prepare_pipes,
modeset_pipes, pipe_config);
valleyview_modeset_global_pipes(dev, &prepare_pipes);
 
/* may have added more to prepare_pipes than we should */
prepare_pipes &= ~disable_pipes;
9646,6 → 10585,7
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
 
/*
* Calculate and store various constants which
9667,7 → 10607,30
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
ret = intel_crtc_mode_set(&intel_crtc->base,
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
obj,
NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
goto done;
}
if (old_fb)
intel_unpin_fb_obj(old_obj);
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
mutex_unlock(&dev->struct_mutex);
 
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
 
ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
x, y, fb);
if (ret)
goto done;
9674,8 → 10637,11
}
 
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
update_scanline_offset(intel_crtc);
 
dev_priv->display.crtc_enable(&intel_crtc->base);
}
 
/* FIXME: add subpixel order */
done:
9704,7 → 10670,7
 
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
}
 
#undef for_each_intel_crtc_masked
9716,6 → 10682,7
 
kfree(config->save_connector_encoders);
kfree(config->save_encoder_crtcs);
kfree(config->save_crtc_enabled);
kfree(config);
}
 
9722,10 → 10689,17
static int intel_set_config_save_state(struct drm_device *dev,
struct intel_set_config *config)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int count;
 
config->save_crtc_enabled =
kcalloc(dev->mode_config.num_crtc,
sizeof(bool), GFP_KERNEL);
if (!config->save_crtc_enabled)
return -ENOMEM;
 
config->save_encoder_crtcs =
kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
9743,6 → 10717,11
* restored, not the drivers personal bookkeeping.
*/
count = 0;
for_each_crtc(dev, crtc) {
config->save_crtc_enabled[count++] = crtc->enabled;
}
 
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
config->save_encoder_crtcs[count++] = encoder->crtc;
}
9758,11 → 10737,22
static void intel_set_config_restore_state(struct drm_device *dev,
struct intel_set_config *config)
{
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
int count;
 
count = 0;
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = config->save_crtc_enabled[count++];
 
if (crtc->new_enabled)
crtc->new_config = &crtc->config;
else
crtc->new_config = NULL;
}
 
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->new_crtc =
to_intel_crtc(config->save_encoder_crtcs[count++]);
9804,13 → 10794,18
* and then just flip_or_move it */
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
} else if (set->crtc->primary->fb != set->fb) {
/*
* If we have no fb, we can only flip as long as the crtc is
* active, otherwise we need a full mode set. The crtc may
* be active if we've only disabled the primary plane, or
* in fastboot situations.
*/
if (set->crtc->primary->fb == NULL) {
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
 
if (intel_crtc->active && i915_fastboot) {
if (intel_crtc->active) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
9820,7 → 10815,7
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->pixel_format !=
set->crtc->fb->pixel_format) {
set->crtc->primary->fb->pixel_format) {
config->mode_changed = true;
} else {
config->fb_changed = true;
9846,9 → 10841,9
struct drm_mode_set *set,
struct intel_set_config *config)
{
struct drm_crtc *new_crtc;
struct intel_connector *connector;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
int ro;
 
/* The upper layers ensure that we either disable a crtc or have a list
9862,7 → 10857,7
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base) {
connector->new_encoder = connector->encoder;
connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
break;
}
}
9877,7 → 10872,7
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.base.id,
drm_get_connector_name(&connector->base));
connector->base.name);
}
 
 
9891,6 → 10886,8
/* Update crtc of enabled connectors. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
struct drm_crtc *new_crtc;
 
if (!connector->new_encoder)
continue;
 
9906,11 → 10903,11
new_crtc)) {
return -EINVAL;
}
connector->encoder->new_crtc = to_intel_crtc(new_crtc);
connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
drm_get_connector_name(&connector->base),
connector->base.name,
new_crtc->base.id);
}
 
9940,10 → 10937,63
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->new_encoder)
if (connector->new_encoder != connector->encoder)
connector->encoder = connector->new_encoder;
}
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
 
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
base.head) {
if (encoder->new_crtc == crtc) {
crtc->new_enabled = true;
break;
}
}
 
if (crtc->new_enabled != crtc->base.enabled) {
DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
crtc->new_enabled ? "en" : "dis");
config->mode_changed = true;
}
 
if (crtc->new_enabled)
crtc->new_config = &crtc->config;
else
crtc->new_config = NULL;
}
 
return 0;
}
 
static void disable_crtc_nofb(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
struct intel_connector *connector;
 
DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
pipe_name(crtc->pipe));
 
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
if (connector->new_encoder &&
connector->new_encoder->new_crtc == crtc)
connector->new_encoder = NULL;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
if (encoder->new_crtc == crtc)
encoder->new_crtc = NULL;
}
 
crtc->new_enabled = false;
crtc->new_config = NULL;
}
 
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
9982,7 → 11032,7
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
save_set.fb = set->crtc->fb;
save_set.fb = set->crtc->primary->fb;
 
/* Compute whether we need a full modeset, only an fb base update or no
* change at all. In the future we might also check whether only the
9998,11 → 11048,24
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
// intel_crtc_wait_for_pending_flips(set->crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
 
 
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
 
/*
* We need to make sure the primary plane is re-enabled if it
* has previously been turned off.
*/
if (!intel_crtc->primary_enabled && ret == 0) {
WARN_ON(!intel_crtc->active);
intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
intel_crtc->pipe);
}
 
/*
* In the fastboot case this may be our only check of the
* state after boot. It would be better to only do it on
* the first update, but we don't have a nice way of doing that
10010,7 → 11073,7
* flipping, so increasing its cost here shouldn't be a big
* deal).
*/
if (i915_fastboot && ret == 0)
if (i915.fastboot && ret == 0)
intel_modeset_check_state(set->crtc->dev);
}
 
10020,6 → 11083,15
fail:
intel_set_config_restore_state(dev, config);
 
/*
* HACK: if the pipe was on, but we didn't have a framebuffer,
* force the pipe off to avoid oopsing in the modeset code
* due to fb==NULL. This should only happen during boot since
* we don't yet reconstruct the FB from the hardware state.
*/
if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
disable_crtc_nofb(to_intel_crtc(save_set.crtc));
 
/* Try to restore the config */
if (config->mode_changed &&
intel_set_mode(save_set.crtc, save_set.mode,
10033,8 → 11105,6
}
 
static const struct drm_crtc_funcs intel_crtc_funcs = {
// .cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
10041,12 → 11111,6
// .page_flip = intel_crtc_page_flip,
};
 
static void intel_cpu_pll_init(struct drm_device *dev)
{
if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
}
 
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
10053,6 → 11117,9
{
uint32_t val;
 
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(PCH_DPLL(pll->id));
hw_state->dpll = val;
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
10097,7 → 11164,7
struct intel_crtc *crtc;
 
/* Make sure no transcoder isn't still depending on us. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
for_each_intel_crtc(dev, crtc) {
if (intel_crtc_to_shared_dpll(crtc) == pll)
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
10134,7 → 11201,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ibx_pch_dpll_init(dev);
else
dev_priv->num_shared_dpll = 0;
10142,18 → 11211,327
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
 
static int
intel_primary_plane_disable(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
 
if (!plane->fb)
return 0;
 
BUG_ON(!plane->crtc);
 
intel_crtc = to_intel_crtc(plane->crtc);
 
/*
* Even though we checked plane->fb above, it's still possible that
* the primary plane has been implicitly disabled because the crtc
* coordinates given weren't visible, or because we detected
* that it was 100% covered by a sprite plane. Or, the CRTC may be
* off and we've set a fb, but haven't actually turned on the CRTC yet.
* In either case, we need to unpin the FB and let the fb pointer get
* updated, but otherwise we don't need to touch the hardware.
*/
if (!intel_crtc->primary_enabled)
goto disable_unpin;
 
intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
intel_plane->pipe);
disable_unpin:
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
intel_unpin_fb_obj(intel_fb_obj(plane->fb));
mutex_unlock(&dev->struct_mutex);
plane->fb = NULL;
 
return 0;
}
 
static int
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_rect dest = {
/* integer pixels */
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
struct drm_rect src = {
/* 16.16 fixed point */
.x1 = src_x,
.y1 = src_y,
.x2 = src_x + src_w,
.y2 = src_y + src_h,
};
const struct drm_rect clip = {
/* integer pixels */
.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
bool visible;
int ret;
 
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true, &visible);
 
if (ret)
return ret;
 
/*
* If the CRTC isn't enabled, we're just pinning the framebuffer,
* updating the fb pointer, and returning without touching the
* hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
* turn on the display with all planes setup as desired.
*/
if (!crtc->enabled) {
mutex_lock(&dev->struct_mutex);
 
/*
* If we already called setplane while the crtc was disabled,
* we may have an fb pinned; unpin it.
*/
if (plane->fb)
intel_unpin_fb_obj(old_obj);
 
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
 
/* Pin and return without programming hardware */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
 
/*
* If clipping results in a non-visible primary plane, we'll disable
* the primary plane. Note that this is a bit different than what
* happens if userspace explicitly disables the plane by passing fb=0
* because plane->fb still gets set and pinned.
*/
if (!visible) {
mutex_lock(&dev->struct_mutex);
 
/*
* Try to pin the new fb first so that we can bail out if we
* fail.
*/
if (plane->fb != fb) {
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
}
 
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
 
if (intel_crtc->primary_enabled)
intel_disable_primary_hw_plane(dev_priv,
intel_plane->plane,
intel_plane->pipe);
 
 
if (plane->fb != fb)
if (plane->fb)
intel_unpin_fb_obj(old_obj);
 
mutex_unlock(&dev->struct_mutex);
 
return 0;
}
 
ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
if (ret)
return ret;
 
if (!intel_crtc->primary_enabled)
intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
intel_crtc->pipe);
 
return 0;
}
 
/* Common destruction function for both primary and cursor planes */
static void intel_plane_destroy(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
drm_plane_cleanup(plane);
kfree(intel_plane);
}
 
static const struct drm_plane_funcs intel_primary_plane_funcs = {
.update_plane = intel_primary_plane_setplane,
.disable_plane = intel_primary_plane_disable,
.destroy = intel_plane_destroy,
};
 
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *primary;
const uint32_t *intel_primary_formats;
int num_formats;
 
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
if (primary == NULL)
return NULL;
 
primary->can_scale = false;
primary->max_downscale = 1;
primary->pipe = pipe;
primary->plane = pipe;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
 
if (INTEL_INFO(dev)->gen <= 3) {
intel_primary_formats = intel_primary_formats_gen2;
num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
} else {
intel_primary_formats = intel_primary_formats_gen4;
num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
}
 
drm_universal_plane_init(dev, &primary->base, 0,
&intel_primary_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
return &primary->base;
}
 
static int
intel_cursor_plane_disable(struct drm_plane *plane)
{
if (!plane->fb)
return 0;
 
BUG_ON(!plane->crtc);
 
return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
}
 
static int
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_rect dest = {
/* integer pixels */
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
struct drm_rect src = {
/* 16.16 fixed point */
.x1 = src_x,
.y1 = src_y,
.x2 = src_x + src_w,
.y2 = src_y + src_h,
};
const struct drm_rect clip = {
/* integer pixels */
.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
bool visible;
int ret;
 
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &visible);
if (ret)
return ret;
 
crtc->cursor_x = crtc_x;
crtc->cursor_y = crtc_y;
if (fb != crtc->cursor->fb) {
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
} else {
intel_crtc_update_cursor(crtc, visible);
return 0;
}
}
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_cursor_plane_update,
.disable_plane = intel_cursor_plane_disable,
.destroy = intel_plane_destroy,
};
 
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *cursor;
 
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
if (cursor == NULL)
return NULL;
 
cursor->can_scale = false;
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
 
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
return &cursor->base;
}
 
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
 
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
 
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
goto fail;
 
cursor = intel_cursor_plane_create(dev, pipe);
if (!cursor)
goto fail;
 
ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
cursor, &intel_crtc_funcs);
if (ret)
goto fail;
 
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
10163,7 → 11541,7
 
/*
* On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
* is hooked to plane B. Hence we want plane A feeding pipe B.
* is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
10172,6 → 11550,11
intel_crtc->plane = !pipe;
}
 
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
 
init_waitqueue_head(&intel_crtc->vbl_wait);
 
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
10178,13 → 11561,24
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
return;
 
fail:
if (primary)
drm_plane_cleanup(primary);
if (cursor)
drm_plane_cleanup(cursor);
kfree(intel_crtc);
}
 
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
{
struct drm_encoder *encoder = connector->base.encoder;
struct drm_device *dev = connector->base.dev;
 
WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
if (!encoder)
return INVALID_PIPE;
10196,21 → 11590,20
struct drm_file *file)
{
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
 
if (!drmmode_obj) {
if (!drmmode_crtc) {
DRM_ERROR("no such CRTC id\n");
return -ENOENT;
}
 
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
crtc = to_intel_crtc(drmmode_crtc);
pipe_from_crtc_id->pipe = crtc->pipe;
 
return 0;
10225,14 → 11618,9
 
list_for_each_entry(source_encoder,
&dev->mode_config.encoder_list, base.head) {
 
if (encoder == source_encoder)
if (encoders_cloneable(encoder, source_encoder))
index_mask |= (1 << entry);
 
/* Intel hw has only one MUX where enocoders could be cloned. */
if (encoder->cloneable && source_encoder->cloneable)
index_mask |= (1 << entry);
 
entry++;
}
 
10249,8 → 11637,7
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
 
if (IS_GEN5(dev) &&
(I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
 
return true;
10278,6 → 11665,22
return names[output];
}
 
static bool intel_crt_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_ULT(dev))
return false;
 
if (IS_CHERRYVIEW(dev))
return false;
 
if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
return false;
 
return true;
}
 
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
10286,7 → 11689,7
 
intel_lvds_init(dev);
 
if (!IS_ULT(dev))
if (intel_crt_present(dev))
intel_crt_init(dev);
 
if (HAS_DDI(dev)) {
10350,6 → 11753,15
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
}
 
if (IS_CHERRYVIEW(dev)) {
if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
PORT_D);
if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
}
}
 
intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
10389,9 → 11801,9
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
 
// if (SUPPORTS_TV(dev))
// intel_tv_init(dev);
 
intel_edp_psr_init(dev);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
10410,12 → 11822,12
// .create_handle = intel_user_framebuffer_create_handle,
};
 
int intel_framebuffer_init(struct drm_device *dev,
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int aligned_height, tile_height;
int aligned_height;
int pitch_limit;
int ret;
 
10509,9 → 11921,8
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
 
tile_height = IS_GEN2(dev) ? 16 : 8;
aligned_height = ALIGN(mode_cmd->height,
obj->tiling_mode ? tile_height : 1);
aligned_height = intel_align_height(dev, mode_cmd->height,
obj->tiling_mode);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
10547,6 → 11958,8
 
if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
dev_priv->display.find_dpll = g4x_find_best_dpll;
else if (IS_CHERRYVIEW(dev))
dev_priv->display.find_dpll = chv_find_best_dpll;
else if (IS_VALLEYVIEW(dev))
dev_priv->display.find_dpll = vlv_find_best_dpll;
else if (IS_PINEVIEW(dev))
10556,32 → 11969,40
 
if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = haswell_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
}
 
/* Returns the core display clock speed */
10620,6 → 12041,8
} else if (IS_GEN6(dev)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
snb_modeset_global_resources;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
10683,6 → 12106,14
DRM_INFO("applying inverted panel brightness quirk\n");
}
 
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
DRM_INFO("applying backlight present quirk\n");
}
 
struct intel_quirk {
int device;
int subsystem_vendor;
10728,9 → 12159,6
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
 
/* 830 needs to leave pipe A & dpll A up */
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
 
10751,6 → 12179,18
 
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
 
/* Acer Aspire 5336 */
{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
 
/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
 
/* Toshiba CB35 Chromebook (Celeron 2955U) */
{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
 
/* HP Chromebook 14 (Celeron 2955U) */
{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
};
 
static void intel_init_quirks(struct drm_device *dev)
10792,13 → 12232,14
{
intel_prepare_ddi(dev);
 
if (IS_VALLEYVIEW(dev))
vlv_update_cdclk(dev);
 
intel_init_clock_gating(dev);
 
intel_reset_dpio(dev);
 
mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
}
 
void intel_modeset_suspend_hw(struct drm_device *dev)
10809,7 → 12250,9
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i, j, ret;
int sprite, ret;
enum pipe pipe;
struct intel_crtc *crtc;
 
drm_mode_config_init(dev);
 
10840,6 → 12283,15
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
 
if (IS_GEN2(dev)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
} else {
dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
 
dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
 
DRM_DEBUG_KMS("%d display pipe%s available.\n",
10846,13 → 12298,13
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
for_each_pipe(i) {
intel_crtc_init(dev, i);
for (j = 0; j < dev_priv->num_plane; j++) {
ret = intel_plane_init(dev, i, j);
for_each_pipe(pipe) {
intel_crtc_init(dev, pipe);
for_each_sprite(pipe, sprite) {
ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
pipe_name(i), sprite_name(i, j), ret);
pipe_name(pipe), sprite_name(pipe, sprite), ret);
}
}
 
10859,7 → 12311,6
intel_init_dpio(dev);
intel_reset_dpio(dev);
 
intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev);
 
/* Just disable it once at startup */
10868,16 → 12319,33
 
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
}
 
static void
intel_connector_break_all_links(struct intel_connector *connector)
{
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
connector->encoder->connectors_active = false;
connector->encoder->base.crtc = NULL;
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, false);
drm_modeset_unlock_all(dev);
 
for_each_intel_crtc(dev, crtc) {
if (!crtc->active)
continue;
 
/*
* Note that reserving the BIOS fb up front prevents us
* from stuffing other stolen allocations like the ring
* on top. This prevents some ugliness at boot time, and
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
if (dev_priv->display.get_plane_config) {
dev_priv->display.get_plane_config(crtc,
&crtc->plane_config);
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
intel_find_plane_obj(crtc, &crtc->plane_config);
}
}
}
 
static void intel_enable_pipe_a(struct drm_device *dev)
{
10884,6 → 12352,7
struct intel_connector *connector;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
 
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
10900,10 → 12369,8
if (!crt)
return;
 
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
intel_release_load_detect_pipe(crt, &load_detect_temp);
 
 
}
 
static bool
10936,6 → 12403,12
reg = PIPECONF(crtc->config.cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
/* restore vblank interrupts to correct state */
if (crtc->active)
drm_vblank_on(dev, crtc->pipe);
else
drm_vblank_off(dev, crtc->pipe);
 
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
10951,6 → 12424,7
* ... */
plane = crtc->plane;
crtc->plane = !plane;
crtc->primary_enabled = true;
dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane;
 
10960,8 → 12434,17
if (connector->encoder->base.crtc != &crtc->base)
continue;
 
intel_connector_break_all_links(connector);
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
/* multiple connectors may have the same encoder:
* handle them and break crtc link separately */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head)
if (connector->encoder->base.crtc == &crtc->base) {
connector->encoder->base.crtc = NULL;
connector->encoder->connectors_active = false;
}
 
WARN_ON(crtc->active);
crtc->base.enabled = false;
11005,7 → 12488,27
encoder->base.crtc = NULL;
}
}
 
if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
*
* Also on gmch platforms we dont have any hardware bits to
* disable the underrun reporting. Which means we need to start
* out with underrun reporting disabled also on inactive pipes,
* since otherwise we'll complain about the garbage we read when
* e.g. coming up after runtime pm.
*
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
crtc->cpu_fifo_underrun_disabled = true;
crtc->pch_fifo_underrun_disabled = true;
 
update_scanline_offset(crtc);
}
}
 
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
11021,7 → 12524,7
if (encoder->connectors_active && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base));
encoder->base.name);
 
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
11029,9 → 12532,13
if (encoder->base.crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base));
encoder->base.name);
encoder->disable(encoder);
if (encoder->post_disable)
encoder->post_disable(encoder);
}
encoder->base.crtc = NULL;
encoder->connectors_active = false;
 
/* Inconsistent output/port/pipe state happens presumably due to
* a bug in one of the get_hw_state functions. Or someplace else
11042,8 → 12549,8
base.head) {
if (connector->encoder != encoder)
continue;
 
intel_connector_break_all_links(connector);
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
}
/* Enabled encoders without active connectors will be fixed in
11050,11 → 12557,21
* the crtc fixup. */
}
 
void i915_redisable_vga(struct drm_device *dev)
void i915_redisable_vga_power_on(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
 
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
}
}
 
void i915_redisable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
11062,14 → 12579,20
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
 
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
i915_redisable_vga_power_on(dev);
}
 
static bool primary_get_hw_state(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!crtc->active)
return false;
 
return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
}
 
static void intel_modeset_readout_hw_state(struct drm_device *dev)
11081,15 → 12604,16
struct intel_connector *connector;
int i;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
for_each_intel_crtc(dev, crtc) {
memset(&crtc->config, 0, sizeof(crtc->config));
 
crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
 
crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config);
 
crtc->base.enabled = crtc->active;
crtc->primary_enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc);
 
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
11096,17 → 12620,12
crtc->active ? "enabled" : "disabled");
}
 
/* FIXME: Smash this into the new shared dpll infrastructure. */
if (HAS_DDI(dev))
intel_ddi_setup_hw_pll_state(dev);
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
pll->active = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
for_each_intel_crtc(dev, crtc) {
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
pll->active++;
}
11114,6 → 12633,9
 
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on);
 
if (pll->refcount)
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11131,7 → 12653,7
encoder->connectors_active = false;
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
encoder->base.name,
encoder->base.crtc ? "enabled" : "disabled",
pipe_name(pipe));
}
11148,7 → 12670,7
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id,
drm_get_connector_name(&connector->base),
connector->base.name,
connector->base.encoder ? "enabled" : "disabled");
}
}
11171,11 → 12693,9
* Note that this could go away if we move to using crtc_config
* checking everywhere.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
if (crtc->active && i915_fastboot) {
intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
 
for_each_intel_crtc(dev, crtc) {
if (crtc->active && i915.fastboot) {
intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
crtc->base.base.id);
drm_mode_debug_printmodeline(&crtc->base.mode);
11221,7 → 12741,7
dev_priv->pipe_to_crtc_mapping[pipe];
 
__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
crtc->fb);
crtc->primary->fb);
}
} else {
intel_modeset_update_staged_output_state(dev);
11232,21 → 12752,50
 
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
 
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
 
intel_modeset_init_hw(dev);
 
// intel_setup_overlay(dev);
 
mutex_lock(&dev->mode_config.mutex);
drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
mutex_unlock(&dev->mode_config.mutex);
/*
* Make sure any fbs we allocated at startup are properly
* pinned & fenced. When we do the allocation it's too early
* for this.
*/
mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
 
if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
c->primary->fb = NULL;
}
}
mutex_unlock(&dev->struct_mutex);
}
 
void intel_connector_unregister(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
 
intel_panel_destroy_backlight(connector);
drm_connector_unregister(connector);
}
 
void intel_modeset_cleanup(struct drm_device *dev)
{
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_connector *connector;
 
/*
11255,7 → 12804,9
* experience fancy races otherwise.
*/
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
intel_hpd_cancel_work(dev_priv);
dev_priv->pm._irqs_disabled = true;
 
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
11266,14 → 12817,6
 
intel_unregister_dsm_handler();
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
 
intel_increase_pllclock(crtc);
}
 
intel_disable_fbc(dev);
 
intel_disable_gt_powersave(dev);
11287,11 → 12830,19
 
/* destroy the backlight and sysfs files before encoders/connectors */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
intel_panel_destroy_backlight(connector);
drm_sysfs_connector_remove(connector);
struct intel_connector *intel_connector;
 
intel_connector = to_intel_connector(connector);
intel_connector->unregister(intel_connector);
}
 
drm_mode_config_cleanup(dev);
 
intel_cleanup_overlay(dev);
 
mutex_lock(&dev->struct_mutex);
intel_cleanup_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
#endif
}
 
11320,12 → 12871,24
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
 
pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
DRM_ERROR("failed to read control word\n");
return -EIO;
}
 
if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
return 0;
 
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
 
if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
DRM_ERROR("failed to write control word\n");
return -EIO;
}
 
return 0;
}
 
11347,6 → 12910,7
struct intel_pipe_error_state {
bool power_domain_on;
u32 source;
u32 stat;
} pipe[I915_MAX_PIPES];
 
struct intel_plane_error_state {
11377,7 → 12941,7
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int transcoders[] = {
TRANSCODER_A,
11399,19 → 12963,14
 
for_each_pipe(i) {
error->pipe[i].power_domain_on =
intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
intel_display_power_enabled_unlocked(dev_priv,
POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
 
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
} else {
error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
error->cursor[i].position = I915_READ(CURPOS_IVB(i));
error->cursor[i].base = I915_READ(CURBASE_IVB(i));
}
 
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
11427,6 → 12986,9
}
 
error->pipe[i].source = I915_READ(PIPESRC(i));
 
if (HAS_GMCH_DISPLAY(dev))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
 
error->num_transcoders = INTEL_INFO(dev)->num_pipes;
11437,7 → 12999,7
enum transcoder cpu_transcoder = transcoders[i];
 
error->transcoder[i].power_domain_on =
intel_display_power_enabled_sw(dev,
intel_display_power_enabled_unlocked(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
11477,6 → 13039,7
err_printf(m, " Power: %s\n",
error->pipe[i].power_domain_on ? "on" : "off");
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
 
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
/drivers/video/drm/i915/intel_dp.c
64,6 → 64,24
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};
 
/*
* CHV supports eDP 1.4 that have more link rates.
* Below only provides the fixed rate but exclude variable rate.
*/
static const struct dp_link_dpll chv_dpll[] = {
/*
* CHV requires to program fractional division for m2.
* m2 is stored in fixed point format using formula below
* (m2_int << 22) | m2_fraction
*/
{ DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
{ DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
 
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
91,11 → 109,14
}
 
static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 
static int
int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
struct drm_device *dev = intel_dp->attached_connector->base.dev;
 
switch (max_link_bw) {
case DP_LINK_BW_1_62:
102,6 → 123,11
case DP_LINK_BW_2_7:
break;
case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
INTEL_INFO(dev)->gen >= 8) &&
intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
max_link_bw = DP_LINK_BW_5_4;
else
max_link_bw = DP_LINK_BW_2_7;
break;
default:
113,6 → 139,22
return max_link_bw;
}
 
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
u8 source_max, sink_max;
 
source_max = 4;
if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
(intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
source_max = 2;
 
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 
return min(source_max, sink_max);
}
 
/*
* The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec.
163,7 → 205,7
}
 
max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
max_lanes = intel_dp_max_lane_count(intel_dp);
 
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
294,7 → 336,8
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
 
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
302,12 → 345,17
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
 
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
 
return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
power_domain = intel_display_port_power_domain(intel_encoder);
return intel_display_power_enabled(dev_priv, power_domain) &&
(I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
}
 
static void
319,7 → 367,7
if (!is_edp(intel_dp))
return;
 
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
I915_READ(_pp_stat_reg(intel_dp)),
351,31 → 399,46
return status;
}
 
static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
int index)
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*
* Note that PCH attached eDP panels should use a 125MHz input
* clock divider.
/*
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2 and use that
*/
if (IS_VALLEYVIEW(dev)) {
return index ? 0 : 100;
} else if (intel_dig_port->port == PORT_A) {
return index ? 0 : intel_hrawclk(dev) / 2;
}
 
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
 
if (index)
return 0;
if (HAS_DDI(dev))
return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
else if (IS_GEN6(dev) || IS_GEN7(dev))
 
if (intel_dig_port->port == PORT_A) {
if (IS_GEN6(dev) || IS_GEN7(dev))
return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
return 225; /* eDP input clock at 450Mhz */
} else {
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
}
}
 
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (intel_dig_port->port == PORT_A) {
if (index)
return 0;
return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
383,13 → 446,46
case 1: return 72;
default: return 0;
}
} else if (HAS_PCH_SPLIT(dev)) {
} else {
return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
} else {
return index ? 0 :intel_hrawclk(dev) / 2;
}
}
 
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
return index ? 0 : 100;
}
 
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
uint32_t precharge, timeout;
 
if (IS_GEN6(dev))
precharge = 3;
else
precharge = 5;
 
if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
timeout |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
 
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
403,28 → 499,19
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
bool has_aux_irq = true;
uint32_t timeout;
int try, clock = 0;
bool has_aux_irq = HAS_AUX_IRQ(dev);
bool vdd;
 
vdd = _edp_panel_vdd_on(intel_dp);
 
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
// pm_qos_update_request(&dev_priv->pm_qos, 0);
 
intel_dp_check_edp(intel_dp);
 
if (IS_GEN6(dev))
precharge = 3;
else
precharge = 5;
 
if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 
intel_aux_display_runtime_get(dev_priv);
 
/* Try to wait for any previous AUX channel activity */
448,7 → 535,12
goto out;
}
 
while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
has_aux_irq,
send_bytes,
aux_clock_divider);
 
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
457,16 → 549,7
pack_aux(send + i, send_bytes - i));
 
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
timeout |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
I915_WRITE(ch_ctl, send_ctl);
 
status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
 
525,236 → 608,146
// pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_aux_display_runtime_put(dev_priv);
 
if (vdd)
edp_panel_vdd_off(intel_dp, false);
 
return ret;
}
 
/* Write data to the aux channel in native mode */
static int
intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint16_t address, uint8_t *send, int send_bytes)
#define BARE_ADDRESS_SIZE 3
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
uint8_t txbuf[20], rxbuf[20];
size_t txsize, rxsize;
int ret;
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
 
if (WARN_ON(send_bytes > 16))
txbuf[0] = msg->request << 4;
txbuf[1] = msg->address >> 8;
txbuf[2] = msg->address & 0xff;
txbuf[3] = msg->size - 1;
 
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 1;
 
if (WARN_ON(txsize > 20))
return -E2BIG;
 
intel_dp_check_edp(intel_dp);
msg[0] = DP_AUX_NATIVE_WRITE << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
ack >>= 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
break;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
}
return send_bytes;
}
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
 
/* Write a single byte to the aux channel in native mode */
static int
intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
uint16_t address, uint8_t byte)
{
return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
 
/* Return payload size. */
ret = msg->size;
}
break;
 
/* read bytes from a native aux channel */
static int
intel_dp_aux_native_read(struct intel_dp *intel_dp,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
int msg_bytes;
uint8_t reply[20];
int reply_bytes;
uint8_t ack;
int ret;
case DP_AUX_NATIVE_READ:
case DP_AUX_I2C_READ:
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
rxsize = msg->size + 1;
 
if (WARN_ON(recv_bytes > 19))
if (WARN_ON(rxsize > 20))
return -E2BIG;
 
intel_dp_check_edp(intel_dp);
msg[0] = DP_AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
msg[3] = recv_bytes - 1;
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
/*
* Assume happy day, and copy the data. The caller is
* expected to check msg->reply before touching it.
*
* Return payload size.
*/
ret--;
memcpy(msg->buffer, rxbuf + 1, ret);
}
break;
 
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
default:
ret = -EINVAL;
break;
}
 
for (;;) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
if (ret < 0)
return ret;
ack = reply[0] >> 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
memcpy(recv, reply + 1, ret - 1);
return ret - 1;
}
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
else
return -EIO;
}
}
 
static int
intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
static void
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct intel_dp *intel_dp = container_of(adapter,
struct intel_dp,
adapter);
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
unsigned retry;
int msg_bytes;
int reply_bytes;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
const char *name = NULL;
int ret;
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[0] = DP_AUX_I2C_READ << 4;
else
msg[0] = DP_AUX_I2C_WRITE << 4;
 
if (!(mode & MODE_I2C_STOP))
msg[0] |= DP_AUX_I2C_MOT << 4;
 
msg[1] = address >> 8;
msg[2] = address;
 
switch (mode) {
case MODE_I2C_WRITE:
msg[3] = 0;
msg[4] = write_byte;
msg_bytes = 5;
reply_bytes = 1;
switch (port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
name = "DPDDC-A";
break;
case MODE_I2C_READ:
msg[3] = 0;
msg_bytes = 4;
reply_bytes = 2;
case PORT_B:
intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
name = "DPDDC-B";
break;
default:
msg_bytes = 3;
reply_bytes = 1;
case PORT_C:
intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
name = "DPDDC-C";
break;
}
 
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
* required to retry at least seven times upon receiving AUX_DEFER
* before giving up the AUX transaction.
*/
for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
goto out;
}
 
switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
case PORT_D:
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
name = "DPDDC-D";
break;
case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
ret = -EREMOTEIO;
goto out;
case DP_AUX_NATIVE_REPLY_DEFER:
/*
* For now, just give more slack to branch devices. We
* could check the DPCD for I2C bit rate capabilities,
* and if available, adjust the interval. We could also
* be more careful with DP-to-Legacy adapters where a
* long legacy cable may force very low I2C bit rates.
*/
udelay(400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
reply[0]);
ret = -EREMOTEIO;
goto out;
BUG();
}
 
switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
*read_byte = reply[1];
if (!HAS_DDI(dev))
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
intel_dp->aux.name = name;
intel_dp->aux.dev = dev->dev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
 
 
ret = drm_dp_aux_register(&intel_dp->aux);
if (ret < 0) {
DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
name, ret);
return;
}
ret = reply_bytes - 1;
goto out;
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
ret = -EREMOTEIO;
goto out;
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
ret = -EREMOTEIO;
goto out;
}
}
 
DRM_ERROR("too many retries, giving up\n");
ret = -EREMOTEIO;
static void
intel_dp_connector_unregister(struct intel_connector *intel_connector)
{
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
 
out:
ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
intel_connector_unregister(intel_connector);
}
 
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
int ret;
 
DRM_DEBUG_KMS("i2c_init %s\n", name);
intel_dp->algo.running = false;
intel_dp->algo.address = 0;
intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
 
memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
intel_dp->adapter.class = I2C_CLASS_DDC;
strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = intel_connector->base.kdev;
 
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
return ret;
switch (link_bw) {
case DP_LINK_BW_1_62:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
case DP_LINK_BW_2_7:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
case DP_LINK_BW_5_4:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
}
}
 
static void
intel_dp_set_clock(struct intel_encoder *encoder,
767,11 → 760,12
if (IS_G4X(dev)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
} else if (IS_CHERRYVIEW(dev)) {
divisor = chv_dpll;
count = ARRAY_SIZE(chv_dpll);
} else if (IS_VALLEYVIEW(dev)) {
divisor = vlv_dpll;
count = ARRAY_SIZE(vlv_dpll);
788,6 → 782,20
}
}
 
static void
intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder transcoder = crtc->config.cpu_transcoder;
 
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
}
 
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
800,10 → 808,13
struct intel_crtc *intel_crtc = encoder->new_crtc;
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int min_lane_count = 1;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
 
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
810,6 → 821,7
pipe_config->has_pch_encoder = true;
 
pipe_config->has_dp_encoder = true;
pipe_config->has_audio = intel_dp->has_audio;
 
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
833,19 → 845,38
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
dev_priv->vbt.edp_bpp < bpp) {
if (is_edp(intel_dp)) {
if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
bpp = dev_priv->vbt.edp_bpp;
}
 
if (IS_BROADWELL(dev)) {
/* Yes, it's an ugly hack. */
min_lane_count = max_lane_count;
DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
min_lane_count);
} else if (dev_priv->vbt.edp_lanes) {
min_lane_count = min(dev_priv->vbt.edp_lanes,
max_lane_count);
DRM_DEBUG_KMS("using min %u lanes per VBT\n",
min_lane_count);
}
 
if (dev_priv->vbt.edp_rate) {
min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
bws[min_clock]);
}
}
 
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
 
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = min_clock; clock <= max_clock; clock++) {
for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
891,6 → 922,17
pipe_config->port_clock,
&pipe_config->dp_m_n);
 
if (intel_connector->panel.downclock_mode != NULL &&
intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
&pipe_config->dp_m2_n2);
}
 
if (HAS_DDI(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
 
return true;
926,7 → 968,7
udelay(500);
}
 
static void intel_dp_mode_set(struct intel_encoder *encoder)
static void intel_dp_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
961,7 → 1003,7
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
 
if (intel_dp->has_audio) {
if (crtc->config.has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
994,26 → 1036,27
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
if (!IS_CHERRYVIEW(dev)) {
if (crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
} else {
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
}
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
 
if (port == PORT_A && !IS_VALLEYVIEW(dev))
ironlake_set_pll_cpu_edp(intel_dp);
}
 
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
 
#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
 
#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
 
static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
1038,25 → 1081,42
DRM_DEBUG_KMS("Wait complete\n");
}
 
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
static void wait_panel_on(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power on\n");
ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
 
static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
static void wait_panel_off(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power off time\n");
ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
 
static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power cycle\n");
ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 
/* When we disable the VDD override bit last we have to do the manual
* wait. */
wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
intel_dp->panel_power_cycle_delay);
 
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
 
static void wait_backlight_on(struct intel_dp *intel_dp)
{
wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
intel_dp->backlight_on_delay);
}
 
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
{
wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
intel_dp->backlight_off_delay);
}
 
/* Read the current pp_control value, unlocking the register if it
* is locked
*/
1073,30 → 1133,32
return control;
}
 
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
 
if (!is_edp(intel_dp))
return;
return false;
 
WARN(intel_dp->want_panel_vdd,
"eDP VDD already requested on\n");
 
intel_dp->want_panel_vdd = true;
 
if (ironlake_edp_have_panel_vdd(intel_dp))
return;
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
 
intel_runtime_pm_get(dev_priv);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
DRM_DEBUG_KMS("Turning eDP VDD on\n");
 
if (!ironlake_edp_have_panel_power(intel_dp))
ironlake_wait_panel_power_cycle(intel_dp);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
 
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
1111,22 → 1173,38
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!ironlake_edp_have_panel_power(intel_dp)) {
if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP was not running\n");
msleep(intel_dp->panel_power_up_delay);
}
 
return need_to_disable;
}
 
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
if (is_edp(intel_dp)) {
bool vdd = _edp_panel_vdd_on(intel_dp);
 
WARN(!vdd, "eDP VDD already requested on\n");
}
}
 
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
 
DRM_DEBUG_KMS("Turning eDP VDD off\n");
 
pp = ironlake_get_pp_control(intel_dp);
1143,25 → 1221,39
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
 
if ((pp & POWER_TARGET_ON) == 0)
msleep(intel_dp->panel_power_cycle_delay);
intel_dp->last_power_cycle = jiffies;
 
intel_runtime_pm_put(dev_priv);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
}
 
static void ironlake_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
 
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
unsigned long delay;
 
/*
* Queue the timer to fire a long time from now (relative to the power
* down delay) to keep the panel power up across a sequence of
* operations.
*/
delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
}
 
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
 
1169,20 → 1261,13
 
intel_dp->want_panel_vdd = false;
 
if (sync) {
ironlake_panel_vdd_off_sync(intel_dp);
} else {
/*
* Queue the timer to fire a long
* time from now (relative to the power down delay)
* to keep the panel power up across a sequence of operations
*/
schedule_delayed_work(&intel_dp->panel_vdd_work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
if (sync)
edp_panel_vdd_off_sync(intel_dp);
else
edp_panel_vdd_schedule_off(intel_dp);
}
}
 
void ironlake_edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
1194,12 → 1279,12
 
DRM_DEBUG_KMS("Turn eDP power on\n");
 
if (ironlake_edp_have_panel_power(intel_dp)) {
if (edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP power already on\n");
return;
}
 
ironlake_wait_panel_power_cycle(intel_dp);
wait_panel_power_cycle(intel_dp);
 
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
1217,7 → 1302,8
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
ironlake_wait_panel_on(intel_dp);
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
 
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1226,10 → 1312,13
}
}
 
void ironlake_edp_panel_off(struct intel_dp *intel_dp)
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_ctrl_reg;
 
1238,20 → 1327,30
 
DRM_DEBUG_KMS("Turn eDP power off\n");
 
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
EDP_BLC_ENABLE);
 
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
intel_dp->want_panel_vdd = false;
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
ironlake_wait_panel_off(intel_dp);
intel_dp->last_power_cycle = jiffies;
wait_panel_off(intel_dp);
 
/* We got a reference when we enabled the VDD. */
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
 
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
void intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
1263,6 → 1362,9
return;
 
DRM_DEBUG_KMS("\n");
 
intel_panel_enable_backlight(intel_dp->attached_connector);
 
/*
* If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP
1269,7 → 1371,7
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
msleep(intel_dp->backlight_on_delay);
wait_backlight_on(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
 
1277,11 → 1379,9
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
intel_panel_enable_backlight(intel_dp->attached_connector);
}
 
void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
void intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
1291,8 → 1391,6
if (!is_edp(intel_dp))
return;
 
intel_panel_disable_backlight(intel_dp->attached_connector);
 
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
1301,7 → 1399,11
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
msleep(intel_dp->backlight_off_delay);
intel_dp->last_backlight_off = jiffies;
 
edp_wait_backlight_off(intel_dp);
 
intel_panel_disable_backlight(intel_dp->attached_connector);
}
 
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1365,7 → 1467,7
return;
 
if (mode != DRM_MODE_DPMS_ON) {
ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D3);
if (ret != 1)
DRM_DEBUG_DRIVER("failed to write sink power state\n");
1375,8 → 1477,7
* time to wake up.
*/
for (i = 0; i < 3; i++) {
ret = intel_dp_aux_native_write_1(intel_dp,
DP_SET_POWER,
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D0);
if (ret == 1)
break;
1392,13 → 1493,22
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp = I915_READ(intel_dp->output_reg);
enum intel_display_power_domain power_domain;
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(intel_dp->output_reg);
 
if (!(tmp & DP_PORT_EN))
return false;
 
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
*pipe = PORT_TO_PIPE_CPT(tmp);
} else if (IS_CHERRYVIEW(dev)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
*pipe = PORT_TO_PIPE(tmp);
} else {
1446,8 → 1556,11
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
int dotclock;
 
tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_AUDIO_OUTPUT_ENABLE)
pipe_config->has_audio = true;
 
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_SYNC_HS_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
1512,11 → 1625,9
}
}
 
static bool is_edp_psr(struct drm_device *dev)
static bool is_edp_psr(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return dev_priv->psr.sink_support;
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
 
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1564,9 → 1675,6
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
 
if (intel_dp->psr_setup_done)
return;
 
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
1578,27 → 1686,30
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
intel_dp->psr_setup_done = true;
}
 
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
bool only_standby = false;
 
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
DP_PSR_ENABLE &
~DP_PSR_MAIN_LINK_ACTIVE);
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
DP_PSR_ENABLE |
DP_PSR_MAIN_LINK_ACTIVE);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 
/* Setup AUX registers */
I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1612,23 → 1723,29
 
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
 
I915_WRITE(EDP_PSR_CTL(dev), val |
IS_BROADWELL(dev) ? 0 : link_entry_time |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
1641,51 → 1758,27
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 
lockdep_assert_held(&dev_priv->psr.lock);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
dev_priv->psr.source_ok = false;
 
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return false;
}
 
if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
(dig_port->port != PORT_A)) {
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
 
if (!i915_enable_psr) {
if (!i915.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
}
 
crtc = dig_port->base.base.crtc;
if (crtc == NULL) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
return false;
}
/* Below limitations aren't valid for Broadwell */
if (IS_BROADWELL(dev))
goto out;
 
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc_active(crtc)) {
DRM_DEBUG_KMS("crtc not active for PSR\n");
return false;
}
 
obj = to_intel_framebuffer(crtc->fb)->obj;
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
return false;
}
 
if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
return false;
}
 
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1697,6 → 1790,7
return false;
}
 
out:
dev_priv->psr.source_ok = true;
return true;
}
1703,39 → 1797,67
 
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!intel_edp_psr_match_conditions(intel_dp) ||
intel_edp_is_psr_enabled(dev))
return;
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
 
/* Setup PSR once */
intel_edp_psr_setup(intel_dp);
 
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
 
/* Enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
 
dev_priv->psr.active = true;
}
 
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (intel_edp_psr_match_conditions(intel_dp) &&
!intel_edp_is_psr_enabled(dev))
intel_edp_psr_do_enable(intel_dp);
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
 
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
return;
}
 
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
dev_priv->psr.busy_frontbuffer_bits = 0;
 
/* Setup PSR once */
intel_edp_psr_setup(intel_dp);
 
if (intel_edp_psr_match_conditions(intel_dp))
dev_priv->psr.enabled = intel_dp;
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!intel_edp_is_psr_enabled(dev))
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
1743,28 → 1865,124
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
 
void intel_edp_psr_update(struct drm_device *dev)
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
 
cancel_delayed_work_sync(&dev_priv->psr.work);
}
 
static void intel_edp_psr_work(struct work_struct *work)
{
struct intel_encoder *encoder;
struct intel_dp *intel_dp = NULL;
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
if (encoder->type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(&encoder->base);
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
 
if (!is_edp_psr(dev))
return;
if (!intel_dp)
goto unlock;
 
if (!intel_edp_psr_match_conditions(intel_dp))
intel_edp_psr_disable(intel_dp);
else
if (!intel_edp_is_psr_enabled(dev))
/*
* The delayed work can race with an invalidate hence we need to
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
 
intel_edp_psr_do_enable(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
 
static void intel_edp_psr_do_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
 
WARN_ON(!(val & EDP_PSR_ENABLE));
 
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
 
dev_priv->psr.active = false;
}
 
}
 
void intel_edp_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
intel_edp_psr_do_exit(dev);
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
/*
* On Haswell sprite plane updates don't result in a psr invalidating
* signal in the hardware. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering.
*/
if (IS_HASWELL(dev) &&
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_edp_psr_do_exit(dev);
 
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
mutex_init(&dev_priv->psr.lock);
}
 
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1773,9 → 1991,10
 
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_backlight_off(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
intel_edp_panel_off(intel_dp);
 
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1782,19 → 2001,61
intel_dp_link_down(intel_dp);
}
 
static void intel_post_disable_dp(struct intel_encoder *encoder)
static void g4x_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
 
if (port == PORT_A || IS_VALLEYVIEW(dev)) {
if (port != PORT_A)
return;
 
intel_dp_link_down(intel_dp);
if (!IS_VALLEYVIEW(dev))
ironlake_edp_pll_off(intel_dp);
}
 
static void vlv_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
intel_dp_link_down(intel_dp);
}
 
static void chv_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
intel_dp_link_down(intel_dp);
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Propagate soft reset to data lane reset */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void intel_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1805,11 → 2066,11
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
1819,7 → 2080,7
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
intel_enable_dp(encoder);
ironlake_edp_backlight_on(intel_dp);
intel_edp_backlight_on(intel_dp);
}
 
static void vlv_enable_dp(struct intel_encoder *encoder)
1826,7 → 2087,7
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
ironlake_edp_backlight_on(intel_dp);
intel_edp_backlight_on(intel_dp);
}
 
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1834,9 → 2095,14
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 
if (dport->port == PORT_A)
intel_dp_prepare(encoder);
 
/* Only ilk+ has port A */
if (dport->port == PORT_A) {
ironlake_set_pll_cpu_edp(intel_dp);
ironlake_edp_pll_on(intel_dp);
}
}
 
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{
1865,10 → 2131,12
 
mutex_unlock(&dev_priv->dpio_lock);
 
if (is_edp(intel_dp)) {
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
}
 
intel_enable_dp(encoder);
 
1885,6 → 2153,8
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
 
intel_dp_prepare(encoder);
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1903,29 → 2173,155
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void chv_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq power_seq;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
/* Program Tx lane latency optimal setting*/
for (i = 0; i < 4; i++) {
/* Set the latency optimal bit */
data = (i == 1) ? 0x0 : 0x6;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
data << DPIO_FRC_LATENCY_SHFIT);
 
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
 
/* Data lane stagger programming */
/* FIXME: Fix up value only after power analysis */
 
mutex_unlock(&dev_priv->dpio_lock);
 
if (is_edp(intel_dp)) {
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
}
 
intel_enable_dp(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
}
 
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
 
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
 
/*
* This a a bit weird since generally CL
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
*
* Sinks are *supposed* to come up within 1ms from an off state, but we're also
* supposed to retry 3 times per the spec.
*/
static bool
intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
uint8_t *recv, int recv_bytes)
static ssize_t
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
int ret, i;
ssize_t ret;
int i;
 
/*
* Sinks are *supposed* to come up within 1ms from an off state,
* but we're also supposed to retry 3 times per the spec.
*/
for (i = 0; i < 3; i++) {
ret = intel_dp_aux_native_read(intel_dp, address, recv,
recv_bytes);
if (ret == recv_bytes)
return true;
ret = drm_dp_dpcd_read(aux, offset, buffer, size);
if (ret == size)
return ret;
msleep(1);
}
 
return false;
return ret;
}
 
/*
1935,17 → 2331,13
static bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
return intel_dp_aux_native_read_retry(intel_dp,
return intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_LANE0_1_STATUS,
link_status,
DP_LINK_STATUS_SIZE);
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
 
/*
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
 
/* These are source-specific values. */
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
1952,7 → 2344,7
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
1968,20 → 2360,9
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (IS_BROADWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else if (IS_HASWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
2128,6 → 2509,166
return 0;
}
 
static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
u32 deemph_reg_value, margin_reg_value, val;
uint8_t train_set = intel_dp->train_set[0];
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
int i;
 
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
deemph_reg_value = 128;
margin_reg_value = 52;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
deemph_reg_value = 128;
margin_reg_value = 77;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
deemph_reg_value = 128;
margin_reg_value = 102;
break;
case DP_TRAIN_VOLTAGE_SWING_1200:
deemph_reg_value = 128;
margin_reg_value = 154;
/* FIXME extra to set for 1200 */
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_3_5:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
deemph_reg_value = 85;
margin_reg_value = 78;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
deemph_reg_value = 85;
margin_reg_value = 116;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
deemph_reg_value = 85;
margin_reg_value = 154;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_6:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
deemph_reg_value = 64;
margin_reg_value = 104;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
deemph_reg_value = 64;
margin_reg_value = 154;
break;
default:
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_9_5:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
deemph_reg_value = 43;
margin_reg_value = 154;
break;
default:
return 0;
}
break;
default:
return 0;
}
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
/* Program swing deemph */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
}
 
/* Program swing margin */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
 
/* Disable unique transition scale */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
 
if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
== DP_TRAIN_PRE_EMPHASIS_0) &&
((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
== DP_TRAIN_VOLTAGE_SWING_1200)) {
 
/*
* The document said it needs to set bit 27 for ch0 and bit 26
* for ch1. Might be a typo in the doc.
* For now, for this unique transition scale selection, set bit
* 27 for ch0 and ch1.
*/
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
 
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
}
 
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
/* LRC Bypass */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
val |= DPIO_LRC_BYPASS;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
 
mutex_unlock(&dev_priv->dpio_lock);
 
return 0;
}
 
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
2291,41 → 2832,6
}
}
 
static uint32_t
intel_bdw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
 
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
 
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
 
case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
 
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
}
}
 
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2336,12 → 2842,12
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
 
if (IS_BROADWELL(dev)) {
signal_levels = intel_bdw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_HASWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
signal_levels = intel_chv_signal_levels(intel_dp);
mask = 0;
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = intel_vlv_signal_levels(intel_dp);
mask = 0;
2452,7 → 2958,7
len = intel_dp->lane_count + 1;
}
 
ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
buf, len);
 
return ret == len;
2482,9 → 2988,8
I915_WRITE(intel_dp->output_reg, *DP);
POSTING_READ(intel_dp->output_reg);
 
ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
intel_dp->train_set,
intel_dp->lane_count);
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
intel_dp->train_set, intel_dp->lane_count);
 
return ret == intel_dp->lane_count;
}
2540,11 → 3045,11
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
 
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
 
DP |= DP_PORT_EN;
 
2617,10 → 3122,15
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
 
/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
training_pattern = DP_TRAINING_PATTERN_3;
 
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
return;
2647,7 → 3157,7
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
cr_tries++;
continue;
2663,7 → 3173,7
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_2 |
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
tries = 0;
cr_tries++;
2704,22 → 3214,7
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
 
/*
* DDI code has a strict mode set sequence and we should try to respect
* it, otherwise we might hang the machine in many different ways. So we
* really should be disabling the port only on a complete crtc_disable
* sequence. This function is just called under two conditions on DDI
* code:
* - Link train failed while doing crtc_enable, and on this case we
* really should respect the mode set sequence and wait for a
* crtc_disable.
* - Someone turned the monitor off and intel_dp_check_link_status
* called us. We don't need to disable the whole port on this case, so
* when someone turns the monitor on again,
* intel_ddi_prepare_link_retrain will take care of redoing the link
* train.
*/
if (HAS_DDI(dev))
if (WARN_ON(HAS_DDI(dev)))
return;
 
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2736,9 → 3231,6
}
POSTING_READ(intel_dp->output_reg);
 
/* We don't really know why we're doing this */
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2782,8 → 3274,8
 
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) == 0)
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
 
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2796,7 → 3288,7
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
if (is_edp(intel_dp)) {
intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2805,6 → 3297,14
}
}
 
/* Training Pattern 3 support */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported");
} else
intel_dp->use_tps3 = false;
 
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
2812,9 → 3312,9
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
return true; /* no per-port downstream info */
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
intel_dp->downstream_ports,
DP_MAX_DOWNSTREAM_PORTS) == 0)
DP_MAX_DOWNSTREAM_PORTS) < 0)
return false; /* downstream port status fetch failed */
 
return true;
2828,28 → 3328,92
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
ironlake_edp_panel_vdd_off(intel_dp, false);
edp_panel_vdd_off(intel_dp, false);
}
 
static bool
intel_dp_probe_mst(struct intel_dp *intel_dp)
{
u8 buf[1];
 
if (!intel_dp->can_mst)
return false;
 
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
 
_edp_panel_vdd_on(intel_dp);
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n");
intel_dp->is_mst = true;
} else {
DRM_DEBUG_KMS("Sink is not MST capable\n");
intel_dp->is_mst = false;
}
}
edp_panel_vdd_off(intel_dp, false);
 
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
return intel_dp->is_mst;
}
 
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
u8 buf[1];
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
return -EAGAIN;
 
if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
 
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
DP_TEST_SINK_START) < 0)
return -EAGAIN;
 
/* Wait 2 vblanks to be sure we will have the correct CRC value */
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
return -EAGAIN;
 
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
return 0;
}
 
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
return intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector, 1) == 1;
}
 
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
int ret;
 
ret = intel_dp_aux_native_read_retry(intel_dp,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector, 1);
if (!ret)
ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_SINK_COUNT_ESI,
sink_irq_vector, 14);
if (ret != 14)
return false;
 
return true;
2859,9 → 3423,66
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
}
 
static int
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
bool bret;
 
if (intel_dp->is_mst) {
u8 esi[16] = { 0 };
int ret = 0;
int retry;
bool handled;
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
go_again:
if (bret == true) {
 
/* check link status - esi[10] = 0x200c */
if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
 
DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
 
if (handled) {
for (retry = 0; retry < 3; retry++) {
int wret;
wret = drm_dp_dpcd_write(&intel_dp->aux,
DP_SINK_COUNT_ESI+1,
&esi[1], 3);
if (wret == 3) {
break;
}
}
 
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (bret == true) {
DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
goto go_again;
}
} else
ret = 0;
 
return ret;
} else {
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
/* send a hotplug event */
drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
}
}
return -EINVAL;
}
 
/*
* According to DP spec
* 5.1.2:
2870,14 → 3491,16
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
 
void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
if (!intel_encoder->connectors_active)
return;
 
2884,6 → 3507,9
if (WARN_ON(!intel_encoder->base.crtc))
return;
 
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
 
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) {
return;
2898,7 → 3524,7
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
/* Clear interrupt source */
intel_dp_aux_native_write_1(intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector);
 
2910,7 → 3536,7
 
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
drm_get_encoder_name(&intel_encoder->base));
intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
2935,15 → 3561,17
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
uint8_t reg;
if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
&reg, 1))
 
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
&reg, 1) < 0)
return connector_status_unknown;
 
return DP_GET_SINK_COUNT(reg) ? connector_status_connected
: connector_status_disconnected;
}
 
/* If no HPD, poke DDC gently */
if (drm_probe_ddc(&intel_dp->adapter))
if (drm_probe_ddc(&intel_dp->aux.ddc))
return connector_status_connected;
 
/* Well we tried, say unknown for unreliable port types */
3085,13 → 3713,24
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
bool ret;
 
intel_runtime_pm_get(dev_priv);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
 
if (intel_dp->is_mst) {
/* MST devices are disconnected from a monitor POV */
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_disconnected;
goto out;
}
 
intel_dp->has_audio = false;
 
if (HAS_PCH_SPLIT(dev))
3104,10 → 3743,20
 
intel_dp_probe_oui(intel_dp);
 
ret = intel_dp_probe_mst(intel_dp);
if (ret) {
/* if we are in MST mode then this connector
won't appear connected or have anything with EDID on it */
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_disconnected;
goto out;
}
 
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
3119,7 → 3768,7
status = connector_status_connected;
 
out:
intel_runtime_pm_put(dev_priv);
intel_display_power_put(dev_priv, power_domain);
return status;
}
 
3126,14 → 3775,22
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
int ret;
 
/* We should parse the EDID data and find out if it has an audio sink
*/
 
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
intel_display_power_put(dev_priv, power_domain);
if (ret)
return ret;
 
3154,15 → 3811,25
intel_dp_detect_audio(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
 
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
 
intel_display_power_put(dev_priv, power_domain);
 
return has_audio;
}
 
3277,17 → 3944,33
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
i2c_del_adapter(&intel_dp->adapter);
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
kfree(intel_dig_port);
}
 
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
if (!is_edp(intel_dp))
return;
 
edp_panel_vdd_off_sync(intel_dp);
}
 
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
}
 
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
3303,17 → 3986,79
};
 
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.reset = intel_dp_encoder_reset,
.destroy = intel_dp_encoder_destroy,
};
 
static void
void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
return;
}
 
bool
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
bool ret = true;
 
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 
DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
long_hpd ? "long" : "short");
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
if (long_hpd) {
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
 
if (!intel_dp_get_dpcd(intel_dp)) {
goto mst_fail;
}
 
intel_dp_probe_oui(intel_dp);
 
if (!intel_dp_probe_mst(intel_dp))
goto mst_fail;
 
} else {
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
goto mst_fail;
}
 
if (!intel_dp->is_mst) {
/*
* we'll check the link status via the normal hot plug path later -
* but for short hpds we should check it now
*/
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
ret = false;
goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
}
put_power:
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
/* Return which DP Port should be selected for Transcoder DP control */
int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
3362,7 → 4107,7
return false;
}
 
static void
void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
3381,6 → 4126,13
}
}
 
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
{
intel_dp->last_power_cycle = jiffies;
intel_dp->last_power_on = jiffies;
intel_dp->last_backlight_off = jiffies;
}
 
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp,
3503,10 → 4255,17
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
}
 
/* And finally store the new values in the power sequencer. */
/*
* And finally store the new values in the power sequencer. The
* backlight delays are set to 1 because we do manual waits on them. For
* T8, even BSpec recommends doing it. For T9, if we don't do this,
* we'll end up waiting for the backlight off delay twice: once when we
* do the manual sleep, and once when we disable the panel and wait for
* the PP_STATUS bit to become zero.
*/
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
3540,28 → 4299,187
I915_READ(pp_div_reg));
}
 
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct intel_dp *intel_dp = NULL;
struct intel_crtc_config *config = NULL;
struct intel_crtc *intel_crtc = NULL;
struct intel_connector *intel_connector = dev_priv->drrs.connector;
u32 reg, val;
enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
if (refresh_rate <= 0) {
DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
return;
}
 
if (intel_connector == NULL) {
DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
return;
}
 
/*
* FIXME: This needs proper synchronization with psr state. But really
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
*/
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
 
encoder = intel_attached_encoder(&intel_connector->base);
intel_dp = enc_to_intel_dp(&encoder->base);
intel_crtc = encoder->new_crtc;
 
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
return;
}
 
config = &intel_crtc->config;
 
if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
return;
}
 
if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
index = DRRS_LOW_RR;
 
if (index == intel_dp->drrs_state.refresh_rate_type) {
DRM_DEBUG_KMS(
"DRRS requested for previously set RR...ignoring\n");
return;
}
 
if (!intel_crtc->active) {
DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
return;
}
 
if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
reg = PIPECONF(intel_crtc->config.cpu_transcoder);
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
val |= PIPECONF_EDP_RR_MODE_SWITCH;
intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
} else {
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
I915_WRITE(reg, val);
}
 
/*
* mutex taken to ensure that there is no race between differnt
* drrs calls trying to update refresh rate. This scenario may occur
* in future when idleness detection based DRRS in kernel and
* possible calls from user space to set differnt RR are made.
*/
 
mutex_lock(&intel_dp->drrs_state.mutex);
 
intel_dp->drrs_state.refresh_rate_type = index;
 
mutex_unlock(&intel_dp->drrs_state.mutex);
 
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
}
 
static struct drm_display_mode *
intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector,
struct drm_display_mode *fixed_mode)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
 
if (INTEL_INFO(dev)->gen <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
}
 
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
DRM_INFO("VBT doesn't support DRRS\n");
return NULL;
}
 
downclock_mode = intel_find_panel_downclock
(dev, fixed_mode, connector);
 
if (!downclock_mode) {
DRM_INFO("DRRS not supported\n");
return NULL;
}
 
dev_priv->drrs.connector = intel_connector;
 
mutex_init(&intel_dp->drrs_state.mutex);
 
intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
 
intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
DRM_INFO("seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
 
void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp;
enum intel_display_power_domain power_domain;
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
return;
 
intel_dp = enc_to_intel_dp(&intel_encoder->base);
if (!edp_have_panel_vdd(intel_dp))
return;
/*
* The VDD bit needs a power domain reference, so if the bit is
* already enabled when we boot or resume, grab this reference and
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
edp_panel_vdd_schedule_off(intel_dp);
}
 
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
struct intel_connector *intel_connector,
struct edp_power_seq *power_seq)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
struct edp_power_seq power_seq = { 0 };
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
 
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
 
if (!is_edp(intel_dp))
return true;
 
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_edp_panel_vdd_sanitize(intel_encoder);
 
/* Cache DPCD and EDID for edp. */
ironlake_edp_panel_vdd_on(intel_dp);
intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
edp_panel_vdd_off(intel_dp, false);
 
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3575,10 → 4493,10
}
 
/* We now know it's not a ghost, init power sequence regs. */
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
 
edid = drm_get_edid(connector, &intel_dp->adapter);
mutex_lock(&dev->mode_config.mutex);
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
3597,6 → 4515,9
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
intel_dig_port,
intel_connector, fixed_mode);
break;
}
}
3608,8 → 4529,9
if (fixed_mode)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
mutex_unlock(&dev->mode_config.mutex);
 
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
 
return true;
3625,9 → 4547,21
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
const char *name = NULL;
int type, error;
struct edp_power_seq power_seq = { 0 };
int type;
 
/* intel_dp vfuncs */
if (IS_VALLEYVIEW(dev))
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
else if (HAS_PCH_SPLIT(dev))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
 
intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
 
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
3656,73 → 4590,58
connector->doublescan_allowed = 0;
 
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
ironlake_panel_vdd_work);
edp_panel_vdd_work);
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
drm_connector_register(connector);
 
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_dp_connector_unregister;
 
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
if (HAS_DDI(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
break;
case PORT_B:
intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
break;
case PORT_C:
intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
break;
case PORT_D:
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
break;
default:
BUG();
}
}
 
/* Set up the DDC bus. */
/* Set up the hotplug pin. */
switch (port) {
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
name = "DPDDC-A";
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
name = "DPDDC-B";
break;
case PORT_C:
intel_encoder->hpd_pin = HPD_PORT_C;
name = "DPDDC-C";
break;
case PORT_D:
intel_encoder->hpd_pin = HPD_PORT_D;
name = "DPDDC-D";
break;
default:
BUG();
}
 
error = intel_dp_i2c_init(intel_dp, intel_connector, name);
WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
error, port_name(port));
if (is_edp(intel_dp)) {
intel_dp_init_panel_power_timestamps(intel_dp);
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
}
 
intel_dp->psr_setup_done = false;
intel_dp_aux_init(intel_dp, intel_connector);
 
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
i2c_del_adapter(&intel_dp->adapter);
/* init MST on ports that can support it */
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (port == PORT_B || port == PORT_C || port == PORT_D) {
intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
}
}
 
if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
drm_sysfs_connector_remove(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
}
3744,6 → 4663,7
void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
3766,18 → 4686,24
DRM_MODE_ENCODER_TMDS);
 
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->mode_set = intel_dp_mode_set;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_VALLEYVIEW(dev)) {
intel_encoder->suspend = intel_dp_encoder_suspend;
if (IS_CHERRYVIEW(dev)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
} else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = vlv_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
intel_encoder->post_disable = g4x_post_disable_dp;
}
 
intel_dig_port->port = port;
3784,10 → 4710,20
intel_dig_port->dp.output_reg = output_reg;
 
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (IS_CHERRYVIEW(dev)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
} else {
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
}
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
 
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hpd_irq_port[port] = intel_dig_port;
 
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
3794,3 → 4730,46
kfree(intel_connector);
}
}
 
void intel_dp_mst_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* disable MST */
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
if (!intel_dig_port)
continue;
 
if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
if (!intel_dig_port->dp.can_mst)
continue;
if (intel_dig_port->dp.is_mst)
drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
}
}
}
 
void intel_dp_mst_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
if (!intel_dig_port)
continue;
if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
int ret;
 
if (!intel_dig_port->dp.can_mst)
continue;
 
ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
if (ret != 0) {
intel_dp_check_mst_status(&intel_dig_port->dp);
}
}
}
}
/drivers/video/drm/i915/intel_dp_mst.c
0,0 → 1,548
/*
* Copyright © 2008 Intel Corporation
* 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
 
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
 
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = encoder->base.dev;
int bpp;
int lane_count, slots;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct intel_connector *found = NULL, *intel_connector;
int mst_pbn;
 
pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
pipe_config->has_dp_encoder = true;
bpp = 24;
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
intel_dp->lane_count = lane_count;
 
pipe_config->pipe_bpp = 24;
pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
 
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
}
}
 
if (!found) {
DRM_ERROR("can't find connector\n");
return false;
}
 
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
 
pipe_config->pbn = mst_pbn;
slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
 
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n);
 
pipe_config->dp_m_n.tu = slots;
return true;
 
}
 
static void intel_mst_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
int ret;
 
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
 
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
}
}
 
static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
 
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
/* and this can also fail */
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
 
intel_dp->active_mst_links--;
intel_mst->port = NULL;
if (intel_dp->active_mst_links == 0) {
intel_dig_port->base.post_disable(&intel_dig_port->base);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
}
 
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int ret;
uint32_t temp;
struct intel_connector *found = NULL, *intel_connector;
int slots;
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
}
}
 
if (!found) {
DRM_ERROR("can't find connector\n");
return;
}
 
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
intel_mst->port = found->port;
 
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
 
I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
 
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 
 
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
 
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
intel_mst->port, intel_crtc->config.pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
return;
}
 
 
intel_dp->active_mst_links++;
temp = I915_READ(DP_TP_STATUS(port));
I915_WRITE(DP_TP_STATUS(port), temp);
 
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
}
 
static void intel_mst_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int ret;
 
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
1))
DRM_ERROR("Timed out waiting for ACT sent\n");
 
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
 
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
}
 
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
*pipe = intel_mst->pipe;
if (intel_mst->port)
return true;
return false;
}
 
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
u32 temp, flags = 0;
 
pipe_config->has_dp_encoder = true;
 
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (temp & TRANS_DDI_PVSYNC)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
 
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
pipe_config->pipe_bpp = 18;
break;
case TRANS_DDI_BPC_8:
pipe_config->pipe_bpp = 24;
break;
case TRANS_DDI_BPC_10:
pipe_config->pipe_bpp = 30;
break;
case TRANS_DDI_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
pipe_config->adjusted_mode.flags |= flags;
intel_dp_get_m_n(crtc, pipe_config);
 
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
}
 
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct edid *edid;
int ret;
 
edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
if (!edid)
return 0;
 
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
 
return ret;
}
 
static enum drm_connector_status
intel_mst_port_dp_detect(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
 
return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
}
 
static enum drm_connector_status
intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
status = intel_mst_port_dp_detect(connector);
return status;
}
 
static int
intel_dp_mst_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
 
static void
intel_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
 
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
 
drm_connector_cleanup(connector);
kfree(connector);
}
 
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
.destroy = intel_dp_mst_connector_destroy,
};
 
static int intel_dp_mst_get_modes(struct drm_connector *connector)
{
return intel_dp_mst_get_ddc_modes(connector);
}
 
static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
 
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
 
return MODE_OK;
}
 
static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
return &intel_dp->mst_encoders[0]->base.base;
}
 
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.best_encoder = intel_mst_best_encoder,
};
 
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
 
drm_encoder_cleanup(encoder);
kfree(intel_mst);
}
 
static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
.destroy = intel_dp_mst_encoder_destroy,
};
 
static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
{
if (connector->encoder) {
enum pipe pipe;
if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
return false;
return true;
}
return false;
}
 
static void intel_connector_add_to_fbdev(struct intel_connector *connector)
{
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
}
 
static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
}
 
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_connector *intel_connector;
struct drm_connector *connector;
int i;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector)
return NULL;
 
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
 
intel_connector->unregister = intel_connector_unregister;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
 
for (i = PIPE_A; i <= PIPE_C; i++) {
drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_dp->mst_encoders[i]->base.base);
}
intel_dp_add_properties(intel_dp, connector);
 
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
mutex_lock(&dev->mode_config.mutex);
intel_connector_add_to_fbdev(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
drm_connector_register(&intel_connector->base);
return connector;
}
 
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
/* need to nuke the connector */
mutex_lock(&dev->mode_config.mutex);
intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
mutex_unlock(&dev->mode_config.mutex);
 
intel_connector->unregister(intel_connector);
 
mutex_lock(&dev->mode_config.mutex);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
mutex_unlock(&dev->mode_config.mutex);
 
drm_reinit_primary_mode_group(dev);
 
kfree(intel_connector);
DRM_DEBUG_KMS("\n");
}
 
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
 
drm_kms_helper_hotplug_event(dev);
}
 
static struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
};
 
static struct intel_dp_mst_encoder *
intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
{
struct intel_dp_mst_encoder *intel_mst;
struct intel_encoder *intel_encoder;
struct drm_device *dev = intel_dig_port->base.base.dev;
 
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
 
if (!intel_mst)
return NULL;
 
intel_mst->pipe = pipe;
intel_encoder = &intel_mst->base;
intel_mst->primary = intel_dig_port;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
DRM_MODE_ENCODER_DPMST);
 
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
 
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
intel_encoder->get_config = intel_dp_mst_enc_get_config;
 
return intel_mst;
 
}
 
static bool
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
{
int i;
struct intel_dp *intel_dp = &intel_dig_port->dp;
 
for (i = PIPE_A; i <= PIPE_C; i++)
intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
return true;
}
 
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
int ret;
 
intel_dp->can_mst = true;
intel_dp->mst_mgr.cbs = &mst_cbs;
 
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->can_mst = false;
return ret;
}
return 0;
}
 
void
intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
 
if (!intel_dp->can_mst)
return;
 
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
}
/drivers/video/drm/i915/intel_drv.h
32,7 → 32,7
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_dp_mst_helper.h>
 
#define KBUILD_MODNAME "i915.dll"
 
48,10 → 48,10
* we've never had a chance to check the condition before the timeout.
*/
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = GetTimerTicks() + msecs_to_jiffies(MS); \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(GetTimerTicks(), timeout__)) { \
if (time_after(jiffies, timeout__)) { \
if (!(COND)) \
ret__ = -ETIMEDOUT; \
break; \
83,6 → 83,12
#define MAX_OUTPUTS 6
/* maximum connectors per crtcs in the mode set */
 
/* Maximum cursor sizes */
#define GEN2_CURSOR_WIDTH 64
#define GEN2_CURSOR_HEIGHT 64
#define MAX_CURSOR_WIDTH 256
#define MAX_CURSOR_HEIGHT 256
 
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
 
99,6 → 105,7
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_DSI 9
#define INTEL_OUTPUT_UNKNOWN 10
#define INTEL_OUTPUT_DP_MST 11
 
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
105,8 → 112,8
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
 
#define INTEL_DSI_COMMAND_MODE 0
#define INTEL_DSI_VIDEO_MODE 1
#define INTEL_DSI_VIDEO_MODE 0
#define INTEL_DSI_COMMAND_MODE 1
 
struct intel_framebuffer {
struct drm_framebuffer base;
115,9 → 122,10
 
struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer ifb;
struct intel_framebuffer *fb;
struct list_head fbdev_list;
struct drm_display_mode *our_mode;
int preferred_bpp;
};
 
struct intel_encoder {
129,11 → 137,7
struct intel_crtc *new_crtc;
 
int type;
/*
* Intel hw has only one MUX where encoders could be clone, hence a
* simple flag is enough to compute the possible_clones mask.
*/
bool cloneable;
unsigned int cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
154,6 → 158,12
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_config *pipe_config);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
* device interrupts are disabled.
*/
void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
};
167,6 → 177,7
struct {
bool present;
u32 level;
u32 min;
u32 max;
bool enabled;
bool combination_mode; /* gen 2/4 only */
192,6 → 203,14
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
 
/*
* Removes all interfaces through which the connector is accessible
* - like sysfs, debugfs entries -, so that no new operations can be
* started on the connector. Also makes sure all currently pending
* operations finish before returing.
*/
void (*unregister)(struct intel_connector *);
 
/* Panel info for eDP and LVDS */
struct intel_panel panel;
 
201,6 → 220,10
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
 
void *port; /* store this opaque as its illegal to dereference it */
 
struct intel_dp *mst_port;
};
 
typedef struct dpll {
215,6 → 238,12
int p;
} intel_clock_t;
 
struct intel_plane_config {
bool tiled;
int size;
u32 base;
};
 
struct intel_crtc_config {
/**
* quirks - bitfield with hw state readout quirks
225,6 → 254,7
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
 
/* User requested mode, only valid as a starting point to
260,6 → 290,13
* accordingly. */
bool has_dp_encoder;
 
/* Whether we should send NULL infoframes. Required for audio. */
bool has_hdmi_sink;
 
/* Audio enabled on this pipe. Only valid if either has_hdmi_sink or
* has_dp_encoder is set. */
bool has_audio;
 
/*
* Enable dithering, used when the selected pipe bpp doesn't match the
* plane bpp.
287,6 → 324,9
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
 
/* PORT_CLK_SEL for DDI ports. */
uint32_t ddi_pll_sel;
 
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
 
293,6 → 333,9
int pipe_bpp;
struct intel_link_m_n dp_m_n;
 
/* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2;
 
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
315,6 → 358,7
u32 pos;
u32 size;
bool enabled;
bool force_thru;
} pch_pfit;
 
/* FDI configuration, only valid if has_pch_encoder is set. */
324,6 → 368,9
bool ips_enabled;
 
bool double_wide;
 
bool dp_encoder_is_mst;
int pbn;
};
 
struct intel_pipe_wm {
330,8 → 377,16
struct intel_wm_level wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
bool sprites_scaled;
};
 
struct intel_mmio_flip {
u32 seqno;
u32 ring_id;
};
 
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
344,7 → 399,6
*/
bool active;
unsigned long enabled_power_domains;
bool eld_vld;
bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
359,14 → 413,15
 
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
bool cursor_visible;
uint32_t cursor_cntl;
uint32_t cursor_base;
 
struct intel_plane_config plane_config;
struct intel_crtc_config config;
struct intel_crtc_config *new_config;
bool new_enabled;
 
uint32_t ddi_pll_sel;
 
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
 
379,10 → 434,15
/* watermarks currently being used */
struct intel_pipe_wm active;
} wm;
 
wait_queue_head_t vbl_wait;
 
int scanline_offset;
};
 
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
uint32_t vert_pixels;
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
395,7 → 455,6
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
448,6 → 507,7
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
 
struct intel_hdmi {
u32 hdmi_reg;
458,15 → 518,29
bool has_audio;
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
enum hdmi_picture_aspect aspect_ratio;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode);
};
 
struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
 
/**
* HIGH_RR is the highest eDP panel refresh rate read from EDID
* LOW_RR is the lowest eDP panel refresh rate found from EDID
* parsing for same resolution.
*/
enum edp_drrs_refresh_rate_type {
DRRS_HIGH_RR,
DRRS_LOW_RR,
DRRS_MAX_RR, /* RR count */
};
 
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
480,8 → 554,7
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
490,8 → 563,36
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
bool psr_setup_done;
unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
 
bool use_tps3;
bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
 
/* mst connector list */
struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mst_mgr;
 
uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
* This function returns the value we have to program the AUX_CTL
* register with to kick off an AUX transaction.
*/
uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
struct {
enum drrs_support_type type;
enum edp_drrs_refresh_rate_type refresh_rate_type;
struct mutex mutex;
} drrs_state;
 
};
 
struct intel_digital_port {
500,13 → 601,22
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
bool (*hpd_pulse)(struct intel_digital_port *, bool);
};
 
struct intel_dp_mst_encoder {
struct intel_encoder base;
enum pipe pipe;
struct intel_digital_port *primary;
void *port; /* store this opaque as its illegal to dereference it */
};
 
static inline int
vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
case PORT_B:
case PORT_D:
return DPIO_CH0;
case PORT_C:
return DPIO_CH1;
515,6 → 625,20
}
}
 
static inline int
vlv_pipe_to_channel(enum pipe pipe)
{
switch (pipe) {
case PIPE_A:
case PIPE_C:
return DPIO_CH0;
case PIPE_B:
return DPIO_CH1;
default:
BUG();
}
}
 
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
539,6 → 663,8
#define INTEL_FLIP_INACTIVE 0
#define INTEL_FLIP_PENDING 1
#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
bool enable_stall_check;
};
 
545,6 → 671,7
struct intel_set_config {
struct drm_encoder **save_connector_encoders;
struct drm_crtc **save_encoder_crtcs;
bool *save_crtc_enabled;
 
bool fb_changed;
bool mode_changed;
568,6 → 695,12
return container_of(encoder, struct intel_digital_port, base.base);
}
 
static inline struct intel_dp_mst_encoder *
enc_to_mst(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dp_mst_encoder, base.base);
}
 
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
592,13 → 725,26
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void hsw_pc8_disable_interrupts(struct drm_device *dev);
void hsw_pc8_restore_interrupts(struct drm_device *dev);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
/*
* We only use drm_irq_uninstall() at unload and VT switch, so
* this is the only thing we need to check.
*/
return !dev_priv->pm._irqs_disabled;
}
 
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void i9xx_check_fifo_underruns(struct drm_device *dev);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
 
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
617,10 → 763,7
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
bool intel_ddi_pll_select(struct intel_crtc *crtc);
void intel_ddi_pll_enable(struct intel_crtc *crtc);
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
628,6 → 771,10
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
 
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 
/* intel_display.c */
const char *intel_output_name(int output);
634,10 → 781,36
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
/**
* intel_frontbuffer_flip - prepare frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
* synchronous plane updates which will happen on the next vblank and which will
* not get delayed by pending gpu rendering.
*
* Can be called without any locks held.
*/
static inline
void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits)
{
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
 
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
void intel_connector_dpms(struct drm_connector *, int mode);
662,21 → 835,23
struct intel_digital_port *dport);
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
struct intel_engine_cs *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
void intel_framebuffer_fini(struct intel_framebuffer *fb);
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
 
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
683,6 → 858,10
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
void intel_put_shared_dpll(struct intel_crtc *crtc);
 
/* modesetting asserts */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
701,9 → 880,8
unsigned int bpp,
unsigned int pitch);
void intel_display_handle_reset(struct drm_device *dev);
void hsw_enable_pc8_work(struct work_struct *__work);
void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
713,9 → 891,15
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_device *dev, bool enable);
int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
 
 
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
726,22 → 910,38
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
void ironlake_edp_panel_on(struct intel_dp *intel_dp);
void ironlake_edp_panel_off(struct intel_dp *intel_dp);
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_edp_psr_update(struct drm_device *dev);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_edp_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_psr_init(struct drm_device *dev);
 
 
int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_bw(struct intel_dp *intel_dp);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
bool intel_dsi_init(struct drm_device *dev);
void intel_dsi_init(struct drm_device *dev);
 
 
/* intel_dvo.c */
813,7 → 1013,8
 
/* intel_panel.c */
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
struct drm_display_mode *fixed_mode,
struct drm_display_mode *downclock_mode);
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
823,8 → 1024,8
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
u32 max);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
839,10 → 1040,13
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
uint32_t sprite_width,
uint32_t sprite_height,
int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
850,20 → 1054,23
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
int intel_power_domains_init(struct drm_device *dev);
void intel_power_domains_remove(struct drm_device *dev);
bool intel_display_power_enabled(struct drm_device *dev,
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_remove(struct drm_i915_private *);
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool intel_display_power_enabled_sw(struct drm_device *dev,
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_device *dev,
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_device *dev,
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_power_domains_init_hw(struct drm_device *dev);
void intel_set_power_well(struct drm_device *dev, bool enable);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
871,6 → 1078,7
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
/drivers/video/drm/i915/intel_dsi.c
35,6 → 35,11
 
/* the sub-encoders aka panel drivers */
static const struct intel_dsi_device intel_dsi_devices[] = {
{
.panel_id = MIPI_DSI_GENERIC_PANEL_ID,
.name = "vbt-generic-dsi-vid-mode-display",
.dev_ops = &vbt_generic_dsi_display_ops,
},
};
 
static void band_gap_reset(struct drm_i915_private *dev_priv)
59,12 → 64,12
 
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
}
 
static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
 
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
87,6 → 92,9
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
 
if (intel_dsi->dev.dev_ops->mode_fixup)
return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
mode, adjusted_mode);
94,13 → 102,6
return true;
}
 
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
 
vlv_enable_dsi_pll(encoder);
}
 
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
110,32 → 111,27
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->dpio_lock);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
mutex_unlock(&dev_priv->dpio_lock);
 
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
usleep_range(2500, 3000);
 
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
usleep_range(2000, 2500);
}
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
DRM_DEBUG_KMS("\n");
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
usleep_range(2500, 3000);
 
if (intel_dsi->dev.dev_ops->panel_reset)
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
 
/* put device in ready state */
intel_dsi_device_ready(encoder);
 
if (intel_dsi->dev.dev_ops->send_otp_cmds)
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
usleep_range(2500, 3000);
}
 
static void intel_dsi_enable(struct intel_encoder *encoder)
153,9 → 149,14
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
else {
msleep(20); /* XXX */
dpi_send_cmd(intel_dsi, TURN_ON);
dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
msleep(100);
 
if (intel_dsi->dev.dev_ops->enable)
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
 
wait_for_dsi_fifo_empty(intel_dsi);
 
/* assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
temp = temp | intel_dsi->port_bits;
162,11 → 163,74
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
}
}
 
if (intel_dsi->dev.dev_ops->enable)
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
 
DRM_DEBUG_KMS("\n");
 
/* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled */
tmp = I915_READ(DPLL(pipe));
tmp |= DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
 
/* update the hw state for DPLL */
intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
DPLL_REFA_CLK_ENABLE_VLV;
 
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
 
/* put device in ready state */
intel_dsi_device_ready(encoder);
 
msleep(intel_dsi->panel_on_delay);
 
if (intel_dsi->dev.dev_ops->panel_reset)
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
 
if (intel_dsi->dev.dev_ops->send_otp_cmds)
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
 
wait_for_dsi_fifo_empty(intel_dsi);
 
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
intel_dsi_enable(encoder);
}
 
static void intel_dsi_enable_nop(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
 
/* for DSI port enable has to be done before pipe
* and plane enable, so port enable is done in
* pre_enable phase itself unlike other encoders
*/
}
 
static void intel_dsi_pre_disable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
DRM_DEBUG_KMS("\n");
 
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
msleep(10);
}
}
 
static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
179,8 → 243,7
DRM_DEBUG_KMS("\n");
 
if (is_vid_mode(intel_dsi)) {
dpi_send_cmd(intel_dsi, SHUTDOWN);
msleep(10);
wait_for_dsi_fifo_empty(intel_dsi);
 
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe));
190,10 → 253,29
msleep(2);
}
 
/* Panel commands can be sent when clock is in LP11 */
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
 
temp = I915_READ(MIPI_CTRL(pipe));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(pipe), temp |
intel_dsi->escape_clk_div <<
ESCAPE_CLOCK_DIVIDER_SHIFT);
 
I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
 
temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
temp &= ~VID_MODE_FORMAT_MASK;
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
 
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
 
wait_for_dsi_fifo_empty(intel_dsi);
}
 
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
205,38 → 287,50
 
DRM_DEBUG_KMS("\n");
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
 
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
 
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
 
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
 
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
 
vlv_disable_dsi_pll(encoder);
}
 
static void intel_dsi_post_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 val;
 
DRM_DEBUG_KMS("\n");
 
intel_dsi_disable(encoder);
 
intel_dsi_clear_device_ready(encoder);
 
val = I915_READ(DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
 
if (intel_dsi->dev.dev_ops->disable_panel_power)
intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
 
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
}
 
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
243,11 → 337,16
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
enum intel_display_power_domain power_domain;
u32 port, func;
enum pipe p;
 
DRM_DEBUG_KMS("\n");
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
 
/* XXX: this only works for one DSI output */
for (p = PIPE_A; p <= PIPE_B; p++) {
port = I915_READ(MIPI_PORT_CTRL(p));
267,9 → 366,21
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
u32 pclk;
DRM_DEBUG_KMS("\n");
 
/* XXX: read flags, set to adjusted_mode */
/*
* DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0
*/
pipe_config->dpll_hw_state.dpll_md = 0;
 
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
if (!pclk)
return;
 
pipe_config->adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
 
static enum drm_mode_status
359,7 → 470,7
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
}
 
static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
374,9 → 485,6
 
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
/* XXX: Location of the call */
band_gap_reset(dev_priv);
 
/* escape clock divider, 20MHz, shared for A and C. device ready must be
* off when doing this! txclkesc? */
tmp = I915_READ(MIPI_CTRL(0));
447,11 → 555,21
/* dphy stuff */
 
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
 
val = 0;
if (intel_dsi->eotp_pkt == 0)
val |= EOT_DISABLE;
 
if (intel_dsi->clock_stop)
val |= CLOCKSTOP;
 
/* recovery disables */
I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
 
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
 
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
479,17 → 597,42
intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
 
if (is_vid_mode(intel_dsi))
/* Some panels might have resolution which is not a multiple of
* 64 like 1366 x 768. Enable RANDOM resolution support for such
* panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
intel_dsi->video_frmt_cfg_bits |
intel_dsi->video_mode_format);
intel_dsi->video_mode_format |
IP_TG_CONFIG |
RANDOM_DPI_DISPLAY_RESOLUTION);
}
 
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
 
intel_dsi_prepare(encoder);
 
vlv_enable_dsi_pll(encoder);
}
 
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
struct intel_encoder *intel_encoder = &intel_dsi->base;
enum intel_display_power_domain power_domain;
enum drm_connector_status connector_status;
struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
 
DRM_DEBUG_KMS("\n");
return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
power_domain = intel_display_port_power_domain(intel_encoder);
 
intel_display_power_get(dev_priv, power_domain);
connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
intel_display_power_put(dev_priv, power_domain);
 
return connector_status;
}
 
static int intel_dsi_get_modes(struct drm_connector *connector)
542,7 → 685,7
.fill_modes = drm_helper_probe_single_connector_modes,
};
 
bool intel_dsi_init(struct drm_device *dev)
void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
550,19 → 693,31
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *fixed_mode = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_dsi_device *dsi;
unsigned int i;
 
DRM_DEBUG_KMS("\n");
 
/* There is no detection method for MIPI so rely on VBT */
if (!dev_priv->vbt.has_mipi)
return;
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
return;
}
 
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
return false;
return;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dsi);
return false;
return;
}
 
intel_encoder = &intel_dsi->base;
578,14 → 733,14
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
intel_encoder->enable = intel_dsi_enable;
intel_encoder->mode_set = intel_dsi_mode_set;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->enable = intel_dsi_enable_nop;
intel_encoder->disable = intel_dsi_pre_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
intel_encoder->get_config = intel_dsi_get_config;
 
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
 
for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
dsi = &intel_dsi_devices[i];
603,7 → 758,7
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->crtc_mask = (1 << 0); /* XXX */
 
intel_encoder->cloneable = false;
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
 
615,7 → 770,7
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
 
drm_sysfs_connector_add(connector);
drm_connector_register(connector);
 
fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
if (!fixed_mode) {
624,14 → 779,12
}
 
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
 
return true;
return;
 
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
 
return false;
}
/drivers/video/drm/i915/intel_dsi.h
31,7 → 31,6
struct intel_dsi_device {
unsigned int panel_id;
const char *name;
int type;
const struct intel_dsi_dev_ops *dev_ops;
void *dev_priv;
};
85,6 → 84,9
/* virtual channel */
int channel;
 
/* Video mode or command mode */
u16 operation_mode;
 
/* number of DSI lanes */
unsigned int lane_count;
 
95,8 → 97,10
u32 video_mode_format;
 
/* eot for MIPI_EOT_DISABLE register */
u32 eot_disable;
u8 eotp_pkt;
u8 clock_stop;
 
u8 escape_clk_div;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
110,6 → 114,15
u16 hs_to_lp_count;
u16 clk_lp_to_hs_count;
u16 clk_hs_to_lp_count;
 
u16 init_count;
 
/* all delays in ms */
u16 backlight_off_delay;
u16 backlight_on_delay;
u16 panel_on_delay;
u16 panel_off_delay;
u16 panel_pwr_cycle_delay;
};
 
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
119,5 → 132,8
 
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
 
extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
 
#endif /* _INTEL_DSI_H */
/drivers/video/drm/i915/intel_dsi_cmd.c
26,7 → 26,7
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <linux/mipi_display.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
389,7 → 389,7
*
* XXX: commands with data in MIPI_DPI_DATA?
*/
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
399,17 → 399,11
u32 mask;
 
/* XXX: pipe, hs */
if (intel_dsi->hs)
if (hs)
cmd &= ~DPI_LP_MODE;
else
cmd |= DPI_LP_MODE;
 
/* DPI virtual channel?! */
 
mask = DPI_FIFO_EMPTY;
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
 
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
 
425,3 → 419,19
 
return 0;
}
 
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
 
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
 
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
/drivers/video/drm/i915/intel_dsi_cmd.h
28,11 → 28,14
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <linux/mipi_display.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
 
#define DPI_LP_MODE_EN false
#define DPI_HS_MODE_EN true
 
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
 
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
47,7 → 50,8
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen);
 
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
 
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
/drivers/video/drm/i915/intel_dsi_panel_vbt.c
0,0 → 1,591
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Shobhit Kumar <shobhit.kumar@intel.com>
*
*/
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
//#include <asm/intel-mid.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
 
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
 
#define PREPARE_CNT_MAX 0x3F
#define EXIT_ZERO_CNT_MAX 0x3F
#define CLK_ZERO_CNT_MAX 0xFF
#define TRAIL_CNT_MAX 0x1F
 
#define NS_KHZ_RATIO 1000000
 
#define GPI0_NC_0_HV_DDI0_HPD 0x4130
#define GPIO_NC_0_HV_DDI0_PAD 0x4138
#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128
#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118
#define GPIO_NC_3_PANEL0_VDDEN 0x4140
#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148
#define GPIO_NC_4_PANEL0_BLKEN 0x4150
#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158
#define GPIO_NC_5_PANEL0_BLKCTL 0x4160
#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168
#define GPIO_NC_6_PCONF0 0x4180
#define GPIO_NC_6_PAD 0x4188
#define GPIO_NC_7_PCONF0 0x4190
#define GPIO_NC_7_PAD 0x4198
#define GPIO_NC_8_PCONF0 0x4170
#define GPIO_NC_8_PAD 0x4178
#define GPIO_NC_9_PCONF0 0x4100
#define GPIO_NC_9_PAD 0x4108
#define GPIO_NC_10_PCONF0 0x40E0
#define GPIO_NC_10_PAD 0x40E8
#define GPIO_NC_11_PCONF0 0x40F0
#define GPIO_NC_11_PAD 0x40F8
 
struct gpio_table {
u16 function_reg;
u16 pad_reg;
u8 init;
};
 
static struct gpio_table gtable[] = {
{ GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
{ GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
{ GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
{ GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
{ GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
{ GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
{ GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
{ GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
{ GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
{ GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
{ GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
};
 
static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
{
u8 type, byte, mode, vc, port;
u16 len;
 
byte = *data++;
mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
port = (byte >> MIPI_PORT_SHIFT) & 0x3;
 
/* LP or HS mode */
intel_dsi->hs = mode;
 
/* get packet type and increment the pointer */
type = *data++;
 
len = *((u16 *) data);
data += 2;
 
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
dsi_vc_generic_write_0(intel_dsi, vc);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
dsi_vc_generic_write_1(intel_dsi, vc, *data);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
dsi_vc_generic_write(intel_dsi, vc, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
dsi_vc_dcs_write_0(intel_dsi, vc, *data);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
break;
case MIPI_DSI_DCS_READ:
DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
break;
}
 
data += len;
 
return data;
}
 
static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
{
u32 delay = *((u32 *) data);
 
usleep_range(delay, delay + 10);
data += 4;
 
return data;
}
 
static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
{
u8 gpio, action;
u16 function, pad;
u32 val;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
gpio = *data++;
 
/* pull up/down */
action = *data++;
 
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
 
mutex_lock(&dev_priv->dpio_lock);
if (!gtable[gpio].init) {
/* program the function */
/* FIXME: remove constant below */
vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
gtable[gpio].init = 1;
}
 
val = 0x4 | action;
 
/* pull up/down */
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->dpio_lock);
 
return data;
}
 
typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
NULL, /* reserved */
mipi_exec_send_packet,
mipi_exec_delay,
mipi_exec_gpio,
NULL, /* status read; later */
};
 
/*
* MIPI Sequence from VBT #53 parsing logic
* We have already separated each seqence during bios parsing
* Following is generic execution function for any sequence
*/
 
static const char * const seq_name[] = {
"UNDEFINED",
"MIPI_SEQ_ASSERT_RESET",
"MIPI_SEQ_INIT_OTP",
"MIPI_SEQ_DISPLAY_ON",
"MIPI_SEQ_DISPLAY_OFF",
"MIPI_SEQ_DEASSERT_RESET"
};
 
static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
{
u8 *data = sequence;
fn_mipi_elem_exec mipi_elem_exec;
int index;
 
if (!sequence)
return;
 
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
 
/* go to the first element of the sequence */
data++;
 
/* parse each byte till we reach end of sequence byte - 0x00 */
while (1) {
index = *data;
mipi_elem_exec = exec_elem[index];
if (!mipi_elem_exec) {
DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
return;
}
 
/* goto element payload */
data++;
 
/* execute the element specific rotines */
data = mipi_elem_exec(intel_dsi, data);
 
/*
* After processing the element, data should point to
* next element or end of sequence
* check if have we reached end of sequence
*/
if (*data == 0x00)
break;
}
}
 
static bool generic_init(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
u32 bits_per_pixel = 24;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
u32 tclk_prepare_clkzero, ths_prepare_hszero;
u32 lp_to_hs_switch, hs_to_lp_switch;
 
DRM_DEBUG_KMS("\n");
 
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
 
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
bits_per_pixel = 16;
 
bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
 
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
intel_dsi->video_frmt_cfg_bits =
mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
 
switch (intel_dsi->escape_clk_div) {
case 0:
tlpx_ns = 50;
break;
case 1:
tlpx_ns = 100;
break;
 
case 2:
tlpx_ns = 200;
break;
default:
tlpx_ns = 50;
break;
}
 
switch (intel_dsi->lane_count) {
case 1:
case 2:
extra_byte_count = 2;
break;
case 3:
extra_byte_count = 4;
break;
case 4:
default:
extra_byte_count = 3;
break;
}
 
/*
* ui(s) = 1/f [f in hz]
* ui(ns) = 10^9 / (f*10^6) [f in Mhz] -> 10^3/f(Mhz)
*/
 
/* in Kbps */
ui_num = NS_KHZ_RATIO;
ui_den = bitrate;
 
tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
ths_prepare_hszero = mipi_config->ths_prepare_hszero;
 
/*
* B060
* LP byte clock = TLPX/ (8UI)
*/
intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
 
/* count values in UI = (ns value) * (bitrate / (2 * 10^6))
*
* Since txddrclkhs_i is 2xUI, all the count values programmed in
* DPHY param register are divided by 2
*
* prepare count
*/
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
 
/* exit zero count */
exit_zero_cnt = DIV_ROUND_UP(
(ths_prepare_hszero - ths_prepare_ns) * ui_den,
ui_num * 2
);
 
/*
* Exit zero is unified val ths_zero and ths_exit
* minimum value for ths_exit = 110ns
* min (exit_zero_cnt * 2) = 110/UI
* exit_zero_cnt = 55/UI
*/
if (exit_zero_cnt < (55 * ui_den / ui_num))
if ((55 * ui_den) % ui_num)
exit_zero_cnt += 1;
 
/* clk zero count */
clk_zero_cnt = DIV_ROUND_UP(
(tclk_prepare_clkzero - ths_prepare_ns)
* ui_den, 2 * ui_num);
 
/* trail count */
tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, 2 * ui_num);
 
if (prepare_cnt > PREPARE_CNT_MAX ||
exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
clk_zero_cnt > CLK_ZERO_CNT_MAX ||
trail_cnt > TRAIL_CNT_MAX)
DRM_DEBUG_DRIVER("Values crossing maximum limits, restricting to max values\n");
 
if (prepare_cnt > PREPARE_CNT_MAX)
prepare_cnt = PREPARE_CNT_MAX;
 
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX)
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
 
if (clk_zero_cnt > CLK_ZERO_CNT_MAX)
clk_zero_cnt = CLK_ZERO_CNT_MAX;
 
if (trail_cnt > TRAIL_CNT_MAX)
trail_cnt = TRAIL_CNT_MAX;
 
/* B080 */
intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
clk_zero_cnt << 8 | prepare_cnt;
 
/*
* LP to HS switch count = 4TLPX + PREP_COUNT * 2 + EXIT_ZERO_COUNT * 2
* + 10UI + Extra Byte Count
*
* HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
* Extra Byte Count is calculated according to number of lanes.
* High Low Switch Count is the Max of LP to HS and
* HS to LP switch count
*
*/
tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
 
/* B044 */
/* FIXME:
* The comment above does not match with the code */
lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * 2 +
exit_zero_cnt * 2 + 10, 8);
 
hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
 
intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
intel_dsi->hs_to_lp_count += extra_byte_count;
 
/* B088 */
/* LP -> HS for clock lanes
* LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
* extra byte count
* 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
* 2(in UI) + extra byte count
* In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
* 8 + extra byte count
*/
intel_dsi->clk_lp_to_hs_count =
DIV_ROUND_UP(
4 * tlpx_ui + prepare_cnt * 2 +
clk_zero_cnt * 2,
8);
 
intel_dsi->clk_lp_to_hs_count += extra_byte_count;
 
/* HS->LP for Clock Lanes
* Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
* Extra byte count
* 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
* In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
* Extra byte count
*/
intel_dsi->clk_hs_to_lp_count =
DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
 
DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
"disabled" : "enabled");
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
DRM_DEBUG_KMS("BTA %s\n",
intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
"disabled" : "enabled");
 
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
* Delay (100us) * 100 /1000 = Delay / 10 (ms) */
intel_dsi->backlight_off_delay = pps->bl_disable_delay / 10;
intel_dsi->backlight_on_delay = pps->bl_enable_delay / 10;
intel_dsi->panel_on_delay = pps->panel_on_delay / 10;
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
 
return true;
}
 
static int generic_mode_valid(struct intel_dsi_device *dsi,
struct drm_display_mode *mode)
{
return MODE_OK;
}
 
static bool generic_mode_fixup(struct intel_dsi_device *dsi,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) {
return true;
}
 
static void generic_panel_reset(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static void generic_disable_panel_power(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static void generic_enable(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static void generic_disable(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
{
return connector_status_connected;
}
 
static bool generic_get_hw_state(struct intel_dsi_device *dev)
{
return true;
}
 
static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
return dev_priv->vbt.lfp_lvds_vbt_mode;
}
 
static void generic_destroy(struct intel_dsi_device *dsi) { }
 
/* Callbacks. We might not need them all. */
struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
.init = generic_init,
.mode_valid = generic_mode_valid,
.mode_fixup = generic_mode_fixup,
.panel_reset = generic_panel_reset,
.disable_panel_power = generic_disable_panel_power,
.send_otp_cmds = generic_send_otp_cmds,
.enable = generic_enable,
.disable = generic_disable,
.detect = generic_detect,
.get_hw_state = generic_get_hw_state,
.get_modes = generic_get_modes,
.destroy = generic_destroy,
};
/drivers/video/drm/i915/intel_dsi_pll.c
298,3 → 298,84
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
{
int bpp;
 
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
 
WARN(bpp != pipe_bpp,
"bpp match assertion failure (expected %d, current %d)\n",
bpp, pipe_bpp);
}
 
u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0;
int refclk = 25000;
int i;
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->dpio_lock);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
mutex_unlock(&dev_priv->dpio_lock);
 
/* mask out other bits and extract the P1 divisor */
pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
 
/* mask out the other bits and extract the M1 divisor */
pll_div &= DSI_PLL_M1_DIV_MASK;
pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
 
while (pll_ctl) {
pll_ctl = pll_ctl >> 1;
p++;
}
p--;
 
if (!p) {
DRM_ERROR("wrong P1 divisor\n");
return 0;
}
 
for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
if (lfsr_converts[i] == pll_div)
break;
}
 
if (i == ARRAY_SIZE(lfsr_converts)) {
DRM_ERROR("wrong m_seed programmed\n");
return 0;
}
 
m = i + 62;
 
dsi_clock = (m * refclk) / p;
 
/* pixel_format and pipe_bpp should agree */
assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
 
pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
 
return pclk;
}
/drivers/video/drm/i915/intel_dvo.c
112,8 → 112,16
 
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
u32 tmp;
 
tmp = I915_READ(intel_dvo->dev.dvo_reg);
 
if (!(tmp & DVO_ENABLE))
return false;
 
return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
}
 
285,7 → 293,7
return true;
}
 
static void intel_dvo_mode_set(struct intel_encoder *encoder)
static void intel_dvo_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
343,7 → 351,7
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
 
475,8 → 483,9
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_encoder->get_config = intel_dvo_get_config;
intel_encoder->compute_config = intel_dvo_compute_config;
intel_encoder->mode_set = intel_dvo_mode_set;
intel_encoder->pre_enable = intel_dvo_pre_enable;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
 
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
521,7 → 530,8
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = true;
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
(1 << INTEL_OUTPUT_DVO);
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
528,7 → 538,7
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
intel_encoder->cloneable = false;
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
556,7 → 566,7
intel_dvo->panel_wants_dither = true;
}
 
drm_sysfs_connector_add(connector);
drm_connector_register(connector);
return;
}
 
/drivers/video/drm/i915/intel_fbdev.c
90,6 → 90,7
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
115,24 → 116,27
ret = -ENOMEM;
goto out;
}
obj->has_global_gtt_mapping = 0;
obj->stride = mode_cmd.pitches[0];
 
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_unref;
}
 
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unpin;
}
 
ifbdev->fb = to_intel_framebuffer(fb);
 
return 0;
 
out_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_ggtt_unpin(obj);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
144,7 → 148,7
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = &ifbdev->ifb;
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
151,16 → 155,29
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
int size, ret;
bool prealloc = false;
 
mutex_lock(&dev->struct_mutex);
 
if (!intel_fb->obj) {
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
intel_fb->base.width, intel_fb->base.height,
sizes->fb_width, sizes->fb_height);
drm_framebuffer_unreference(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
goto out_unlock;
intel_fb = ifbdev->fb;
} else {
DRM_DEBUG_KMS("re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height;
}
176,7 → 193,7
 
info->par = helper;
 
fb = &ifbdev->ifb.base;
fb = &ifbdev->fb->base;
 
ifbdev->helper.fb = fb;
ifbdev->helper.fbdev = info;
215,7 → 232,7
return 0;
 
out_unpin:
i915_gem_object_unpin(obj);
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
out_unlock:
mutex_unlock(&dev->struct_mutex);
243,13 → 260,342
*blue = intel_crtc->lut_b[regno] << 8;
}
 
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
static struct drm_fb_helper_crtc *
intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
{
int i;
 
for (i = 0; i < fb_helper->crtc_count; i++)
if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
return &fb_helper->crtc_info[i];
 
return NULL;
}
 
/*
* Try to read the BIOS display configuration and use it for the initial
* fb configuration.
*
* The BIOS or boot loader will generally create an initial display
* configuration for us that includes some set of active pipes and displays.
* This routine tries to figure out which pipes and connectors are active
* and stuffs them into the crtcs and modes array given to us by the
* drm_fb_helper code.
*
* The overall sequence is:
* intel_fbdev_init - from driver load
* intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
* drm_fb_helper_init - build fb helper structs
* drm_fb_helper_single_add_all_connectors - more fb helper structs
* intel_fbdev_initial_config - apply the config
* drm_fb_helper_initial_config - call ->probe then register_framebuffer()
* drm_setup_crtcs - build crtc config for fbdev
* intel_fb_initial_config - find active connectors etc
* drm_fb_helper_single_fb_probe - set up fbdev
* intelfb_create - re-use or alloc fb, build out fbdev structs
*
* Note that we don't make special consideration whether we could actually
* switch to the selected modes without a full modeset. E.g. when the display
* is in VGA mode we need to recalculate watermarks and set a new high-res
* framebuffer anyway.
*/
static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
int i, j;
bool *save_enabled;
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
 
/*
* If the user specified any force options, just bail here
* and use that config.
*/
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
 
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
 
if (!enabled[i])
continue;
 
if (connector->force != DRM_FORCE_UNSPECIFIED)
return false;
}
 
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
if (!save_enabled)
return false;
 
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
 
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *new_crtc;
 
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
 
if (connector->status == connector_status_connected)
num_connectors_detected++;
 
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
continue;
}
 
encoder = connector->encoder;
if (!encoder || WARN_ON(!encoder->crtc)) {
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
continue;
}
 
num_connectors_enabled++;
 
new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
 
/*
* Make sure we're not trying to drive multiple connectors
* with a single CRTC, since our cloning support may not
* match the BIOS.
*/
for (j = 0; j < fb_helper->connector_count; j++) {
if (crtcs[j] == new_crtc) {
DRM_DEBUG_KMS("fallback: cloned configuration\n");
fallback = true;
goto out;
}
}
 
DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
connector->name);
 
/* go for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
 
/* try for preferred next */
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
connector->name);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
 
/* No preferred mode marked by the EDID? Are there any modes? */
if (!modes[i] && !list_empty(&connector->modes)) {
DRM_DEBUG_KMS("using first mode listed on connector %s\n",
connector->name);
modes[i] = list_first_entry(&connector->modes,
struct drm_display_mode,
head);
}
 
/* last resort: use current mode */
if (!modes[i]) {
/*
* IMPORTANT: We want to use the adjusted mode (i.e.
* after the panel fitter upscaling) as the initial
* config, not the input mode, which is what crtc->mode
* usually contains. But since our current fastboot
* code puts a mode derived from the post-pfit timings
* into crtc->mode this works out correctly. We don't
* use hwmode anywhere right now, so use it for this
* since the fb helper layer wants a pointer to
* something we own.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
&to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode;
}
crtcs[i] = new_crtc;
 
DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
 
fallback = false;
}
 
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
fallback = true;
}
 
out:
if (fallback) {
DRM_DEBUG_KMS("Not using firmware configuration\n");
memcpy(enabled, save_enabled, dev->mode_config.num_connector);
kfree(save_enabled);
return false;
}
 
kfree(save_enabled);
return true;
}
 
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.initial_config = intel_fb_initial_config,
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
.fb_probe = intelfb_create,
};
 
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
* so we use that to figure out if there's an object for us to use as the
* fb, and if so, we re-use it for the fbdev configuration.
*
* Note we only support a single fb shared across pipes for boot (mostly for
* fbcon), so we just find the biggest and use that.
*/
static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
struct intel_plane_config *plane_config = NULL;
unsigned int max_size = 0;
 
if (!i915.fastboot)
return false;
 
/* Find the largest fb */
for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
 
if (!intel_crtc->active || !crtc->primary->fb) {
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
 
if (intel_crtc->plane_config.size > max_size) {
DRM_DEBUG_KMS("found possible fb from plane %c\n",
pipe_name(intel_crtc->pipe));
plane_config = &intel_crtc->plane_config;
fb = to_intel_framebuffer(crtc->primary->fb);
max_size = plane_config->size;
}
}
 
if (!fb) {
DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
goto out;
}
 
/* Now make sure all the pipes will fit into it */
for_each_crtc(dev, crtc) {
unsigned int cur_size;
 
intel_crtc = to_intel_crtc(crtc);
 
if (!intel_crtc->active) {
DRM_DEBUG_KMS("pipe %c not active, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
 
DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
pipe_name(intel_crtc->pipe));
 
/*
* See if the plane fb we found above will fit on this
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.bits_per_pixel / 8;
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, fb->base.pitches[0]);
plane_config = NULL;
fb = NULL;
break;
}
 
cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
intel_crtc->config.adjusted_mode.crtc_hdisplay,
intel_crtc->config.adjusted_mode.crtc_vdisplay,
fb->base.bits_per_pixel,
cur_size);
 
if (cur_size > max_size) {
DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, max_size);
plane_config = NULL;
fb = NULL;
break;
}
 
DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
pipe_name(intel_crtc->pipe),
max_size, cur_size);
}
 
if (!fb) {
DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
goto out;
}
 
ifbdev->preferred_bpp = fb->base.bits_per_pixel;
ifbdev->fb = fb;
 
drm_framebuffer_reference(&ifbdev->fb->base);
 
/* Final pass to check if any active pipes don't have fbs */
for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
 
if (!intel_crtc->active)
continue;
 
WARN(!crtc->primary->fb,
"re-used BIOS config but lost an fb on crtc %d\n",
crtc->base.id);
}
 
 
DRM_DEBUG_KMS("using BIOS fb for initial console\n");
return true;
 
out:
 
return false;
}
 
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
256,21 → 602,26
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
return -ENODEV;
 
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
if (ifbdev == NULL)
return -ENOMEM;
 
dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs;
drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
 
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
 
ret = drm_fb_helper_init(dev, &ifbdev->helper,
INTEL_INFO(dev)->num_pipes,
4);
INTEL_INFO(dev)->num_pipes, 4);
if (ret) {
kfree(ifbdev);
return ret;
}
 
dev_priv->fbdev = ifbdev;
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
 
return 0;
279,9 → 630,10
void intel_fbdev_initial_config(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_fbdev *ifbdev = dev_priv->fbdev;
 
/* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
}
 
 
288,5 → 640,6
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->fbdev)
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
/drivers/video/drm/i915/intel_hdmi.c
113,7 → 113,8
}
 
static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
enum transcoder cpu_transcoder)
enum transcoder cpu_transcoder,
struct drm_i915_private *dev_priv)
{
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
296,7 → 297,8
u32 val = I915_READ(ctl_reg);
 
data_reg = hsw_infoframe_data_reg(type,
intel_crtc->config.cpu_transcoder);
intel_crtc->config.cpu_transcoder,
dev_priv);
if (data_reg == 0)
return;
 
365,6 → 367,9
union hdmi_infoframe frame;
int ret;
 
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
 
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
416,6 → 421,7
}
 
static void g4x_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
423,7 → 429,7
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port;
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
 
assert_hdmi_port_disabled(intel_hdmi);
 
438,7 → 444,7
* either. */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
 
if (!intel_hdmi->has_hdmi_sink) {
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
447,18 → 453,6
return;
}
 
switch (intel_dig_port->port) {
case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
default:
BUG();
return;
}
 
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
481,6 → 475,7
}
 
static void ibx_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
489,7 → 484,7
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port;
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
 
assert_hdmi_port_disabled(intel_hdmi);
 
496,7 → 491,7
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
 
if (!intel_hdmi->has_hdmi_sink) {
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
505,21 → 500,6
return;
}
 
switch (intel_dig_port->port) {
case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
case PORT_D:
port = VIDEO_DIP_PORT_D;
break;
default:
BUG();
return;
}
 
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
543,6 → 523,7
}
 
static void cpt_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
556,7 → 537,7
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
 
if (!intel_hdmi->has_hdmi_sink) {
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
579,13 → 560,16
}
 
static void vlv_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
 
assert_hdmi_port_disabled(intel_hdmi);
 
592,7 → 576,7
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
 
if (!intel_hdmi->has_hdmi_sink) {
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
601,9 → 585,19
return;
}
 
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
 
val |= VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
 
I915_WRITE(reg, val);
POSTING_READ(reg);
614,6 → 608,7
}
 
static void hsw_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
624,7 → 619,7
 
assert_hdmi_port_disabled(intel_hdmi);
 
if (!intel_hdmi->has_hdmi_sink) {
if (!enable) {
I915_WRITE(reg, 0);
POSTING_READ(reg);
return;
641,7 → 636,7
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
 
static void intel_hdmi_mode_set(struct intel_encoder *encoder)
static void intel_hdmi_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
663,27 → 658,26
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
 
/* Required on CPT */
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
if (crtc->config.has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
 
if (intel_hdmi->has_audio) {
if (crtc->config.has_audio) {
WARN_ON(!crtc->config.has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
hdmi_val |= HDMI_MODE_SELECT_HDMI;
intel_write_eld(&encoder->base, adjusted_mode);
}
 
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (IS_CHERRYVIEW(dev))
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
 
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
POSTING_READ(intel_hdmi->hdmi_reg);
 
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
 
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
692,8 → 686,13
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(intel_hdmi->hdmi_reg);
 
if (!(tmp & SDVO_ENABLE))
701,6 → 700,8
 
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else if (IS_CHERRYVIEW(dev))
*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
 
727,6 → 728,12
else
flags |= DRM_MODE_FLAG_NVSYNC;
 
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
 
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_audio = true;
 
pipe_config->adjusted_mode.flags |= flags;
 
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
749,7 → 756,7
u32 temp;
u32 enable_bits = SDVO_ENABLE;
 
if (intel_hdmi->has_audio)
if (intel_crtc->config.has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
 
temp = I915_READ(intel_hdmi->hdmi_reg);
841,11 → 848,11
}
}
 
static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
if (IS_G4X(dev))
if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
857,7 → 864,8
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
true))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
868,6 → 876,30
return MODE_OK;
}
 
static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
int count = 0, count_hdmi = 0;
 
if (HAS_GMCH_DISPLAY(dev))
return false;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
if (encoder->new_crtc != crtc)
continue;
 
count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
count++;
}
 
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
return count_hdmi > 0 && count_hdmi == count;
}
 
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
875,12 → 907,14
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
 
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
 
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (intel_hdmi->has_hdmi_sink &&
if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
else
893,6 → 927,9
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
pipe_config->has_pch_encoder = true;
 
if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
pipe_config->has_audio = true;
 
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
899,8 → 936,9
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
&& HAS_PCH_SPLIT(dev)) {
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
hdmi_12bpc_possible(encoder->new_crtc)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
 
934,11 → 972,15
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
enum intel_display_power_domain power_domain;
enum drm_connector_status status = connector_status_disconnected;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
966,31 → 1008,48
intel_encoder->type = INTEL_OUTPUT_HDMI;
}
 
intel_display_power_put(dev_priv, power_domain);
 
return status;
}
 
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
enum intel_display_power_domain power_domain;
int ret;
 
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
 
return intel_ddc_get_modes(connector,
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
ret = intel_ddc_get_modes(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
 
intel_display_power_put(dev_priv, power_domain);
 
return ret;
}
 
static bool
intel_hdmi_detect_audio(struct drm_connector *connector)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
1000,6 → 1059,8
kfree(edid);
}
 
intel_display_power_put(dev_priv, power_domain);
 
return has_audio;
}
 
1066,8 → 1127,25
goto done;
}
 
if (property == connector->dev->mode_config.aspect_ratio_property) {
switch (val) {
case DRM_MODE_PICTURE_ASPECT_NONE:
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
break;
case DRM_MODE_PICTURE_ASPECT_4_3:
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
break;
case DRM_MODE_PICTURE_ASPECT_16_9:
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
break;
default:
return -EINVAL;
}
goto done;
}
 
return -EINVAL;
 
done:
if (intel_dig_port->base.base.crtc)
intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
1075,20 → 1153,34
return 0;
}
 
static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
 
intel_hdmi_prepare(encoder);
 
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config.has_hdmi_sink,
adjusted_mode);
}
 
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
 
if (!IS_VALLEYVIEW(dev))
return;
 
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1115,6 → 1207,10
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
 
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config.has_hdmi_sink,
adjusted_mode);
 
intel_enable_hdmi(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
1130,8 → 1226,7
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
 
if (!IS_VALLEYVIEW(dev))
return;
intel_hdmi_prepare(encoder);
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
1154,6 → 1249,70
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
 
/* program clock channel usage */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
 
/*
* This a a bit weird since generally CL
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1170,6 → 1329,152
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Propagate soft reset to data lane reset */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
/* Program Tx latency optimal setting */
for (i = 0; i < 4; i++) {
/* Set the latency optimal bit */
data = (i == 1) ? 0x0 : 0x6;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
data << DPIO_FRC_LATENCY_SHFIT);
 
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
 
/* Data lane stagger programming */
/* FIXME: Fix up value only after power analysis */
 
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
}
 
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN_MASK;
val |= 102 << DPIO_SWING_MARGIN_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
 
/* Disable unique transition scale */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
 
/* Additional steps for 1200mV-0dB */
#if 0
val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
if (ch)
val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
else
val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
 
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
(0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
#endif
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
/* LRC Bypass */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
val |= DPIO_LRC_BYPASS;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
 
mutex_unlock(&dev_priv->dpio_lock);
 
intel_enable_hdmi(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
}
 
static void intel_hdmi_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
1195,11 → 1500,22
};
 
static void
intel_attach_aspect_ratio_property(struct drm_connector *connector)
{
if (!drm_mode_create_aspect_ratio_property(connector->dev))
drm_object_attach_property(&connector->base,
connector->dev->mode_config.aspect_ratio_property,
DRM_MODE_PICTURE_ASPECT_NONE);
}
 
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_hdmi->color_range_auto = true;
intel_attach_aspect_ratio_property(connector);
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
 
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1230,6 → 1546,9
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
if (IS_CHERRYVIEW(dev))
intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
else
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
1243,7 → 1562,7
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
} else if (!HAS_PCH_SPLIT(dev)) {
} else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (HAS_DDI(dev)) {
1261,11 → 1580,12
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
 
intel_hdmi_add_properties(intel_hdmi, connector);
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
drm_connector_register(connector);
 
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
1299,22 → 1619,41
DRM_MODE_ENCODER_TMDS);
 
intel_encoder->compute_config = intel_hdmi_compute_config;
intel_encoder->mode_set = intel_hdmi_mode_set;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
if (IS_CHERRYVIEW(dev)) {
intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
} else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->enable = intel_enable_hdmi;
}
 
intel_encoder->type = INTEL_OUTPUT_HDMI;
if (IS_CHERRYVIEW(dev)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
else
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
} else {
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
}
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
* to work on real hardware. And since g4x can send infoframes to
* only one port anyway, nothing is lost by allowing it.
*/
if (IS_G4X(dev))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
 
intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
/drivers/video/drm/i915/intel_i2c.c
34,11 → 34,6
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
enum disp_clk {
CDCLK,
CZCLK
};
 
struct gmbus_port {
const char *name;
int reg;
63,60 → 58,11
return container_of(i2c, struct intel_gmbus, adapter);
}
 
static int get_disp_clk_div(struct drm_i915_private *dev_priv,
enum disp_clk clk)
{
u32 reg_val;
int clk_ratio;
 
reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
 
if (clk == CDCLK)
clk_ratio =
((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
else
clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
 
return clk_ratio;
}
 
static void gmbus_set_freq(struct drm_i915_private *dev_priv)
{
int vco, gmbus_freq = 0, cdclk_div;
 
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
vco = valleyview_get_vco(dev_priv);
 
/* Get the CDCLK divide ratio */
cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
 
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
if (cdclk_div)
gmbus_freq = (vco << 1) / cdclk_div;
 
if (WARN_ON(gmbus_freq == 0))
return;
 
I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
}
 
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* In BIOS-less system, program the correct gmbus frequency
* before reading edid.
*/
if (IS_VALLEYVIEW(dev))
gmbus_set_freq(dev_priv);
 
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
258,13 → 204,6
algo->data = bus;
}
 
/*
* gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
* mode. This results in spurious interrupt warnings if the legacy irq no. is
* shared with another device. The kernel then disables that interrupt source
* and so prevents the other device from working properly.
*/
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
/drivers/video/drm/i915/intel_lvds.c
51,6 → 51,7
 
bool is_dual_link;
u32 reg;
u32 a3_power;
 
struct intel_lvds_connector *attached_connector;
};
71,8 → 72,13
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(lvds_encoder->reg);
 
if (!(tmp & LVDS_PORT_EN))
126,10 → 132,6
pipe_config->adjusted_mode.crtc_clock = dotclock;
}
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
176,8 → 178,11
 
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
* panels behave in the two modes. For now, let's just maintain the
* value we got from the BIOS.
*/
temp &= ~LVDS_A3_POWER_MASK;
temp |= lvds_encoder->a3_power;
 
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
275,7 → 280,6
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
290,8 → 294,7
return false;
}
 
if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
LVDS_A3_POWER_UP)
if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
lvds_bpp = 8*3;
else
lvds_bpp = 6*3;
331,15 → 334,6
return true;
}
 
static void intel_lvds_mode_set(struct intel_encoder *encoder)
{
/*
* We don't do anything here, the LVDS port is fully set up in the pre
* enable hook - the ordering constraints for enabling the lvds port vs.
* enabling the display pll are too strict.
*/
}
 
/**
* Detect the LVDS connection.
*
354,7 → 348,7
enum drm_connector_status status;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
 
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
848,8 → 842,8
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* use the module option value if specified */
if (i915_lvds_channel_mode > 0)
return i915_lvds_channel_mode == 2;
if (i915.lvds_channel_mode > 0)
return i915.lvds_channel_mode == 2;
 
// if (dmi_check_system(intel_dual_link_lvds))
// return true;
899,6 → 893,7
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc;
u32 lvds;
952,16 → 947,16
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->mode_set = intel_lvds_mode_set;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
 
intel_encoder->cloneable = false;
intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev))
1000,6 → 995,7
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
mutex_lock(&dev->mode_config.mutex);
edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
1032,15 → 1028,14
 
fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode) {
intel_connector->panel.downclock_mode =
downclock_mode =
intel_find_panel_downclock(dev,
fixed_mode, connector);
if (intel_connector->panel.downclock_mode !=
NULL && i915_lvds_downclock) {
if (downclock_mode != NULL &&
i915.lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = true;
dev_priv->lvds_downclock =
intel_connector->panel.
downclock_mode->clock;
DRM_DEBUG_KMS("LVDS downclock is found"
" in EDID. Normal clock %dKhz, "
1094,10 → 1089,15
goto failed;
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
 
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
LVDS_A3_POWER_MASK;
 
/*
* Unlock registers and just
* leave them unlocked
1109,24 → 1109,19
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
// dev_priv->lid_notifier.notifier_call = intel_lid_notify;
// if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
// DRM_DEBUG_KMS("lid notifier registration failed\n");
// dev_priv->lid_notifier.notifier_call = NULL;
// }
drm_sysfs_connector_add(connector);
drm_connector_register(connector);
 
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
 
return;
 
failed:
mutex_unlock(&dev->mode_config.mutex);
 
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
if (fixed_mode)
drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return;
/drivers/video/drm/i915/intel_opregion.c
27,12 → 27,9
 
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
//#include <linux/acpi.h>
//#include <linux/acpi_io.h>
//#include <acpi/video.h>
#include <linux/errno.h>
#include <linux/acpi.h>
#include <acpi/video.h>
 
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
229,6 → 226,8
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
 
#define MAX_DSLP 1500
 
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
263,10 → 262,11
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
} else if (dslp > 500) {
} else if (dslp > MAX_DSLP) {
/* Hey bios, trust must be earned. */
WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
dslp = 500;
DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
"using %u ms instead\n", dslp, MAX_DSLP);
dslp = MAX_DSLP;
}
 
/* The spec tells us to do this, but we are the only user... */
352,6 → 352,7
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_DP_MST:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
break;
case INTEL_OUTPUT_EDP:
403,6 → 404,15
 
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
/*
* If the acpi_video interface is not supposed to be used, don't
* bother processing backlight level change requests from firmware.
*/
if (!acpi_video_verify_backlight_support()) {
DRM_DEBUG_KMS("opregion backlight request ignored\n");
return 0;
}
 
if (!(bclp & ASLE_BCLP_VALID))
return ASLC_BACKLIGHT_FAILED;
 
410,7 → 420,7
if (bclp > 255)
return ASLC_BACKLIGHT_FAILED;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
/*
* Update backlight on all connectors that support backlight (usually
418,10 → 428,10
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
intel_panel_set_backlight(intel_connector, bclp, 255);
intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
 
return 0;
855,11 → 865,15
return -ENOTSUPP;
}
 
base = ioremap(asls, OPREGION_SIZE);
#ifdef CONFIG_ACPI
INIT_WORK(&opregion->asle_work, asle_work);
#endif
 
base = acpi_os_ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
 
memcpy(buf, base, sizeof(buf));
memcpy_fromio(buf, base, sizeof(buf));
 
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
/drivers/video/drm/i915/intel_panel.c
33,8 → 33,6
#include <linux/moduleparam.h>
#include "intel_drv.h"
 
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
 
void
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
44,6 → 42,59
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
 
/**
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
* @dev: drm device
* @fixed_mode : panel native mode
* @connector: LVDS/eDP connector
*
* Return downclock_avail
* Find the reduced downclock for LVDS/eDP in EDID.
*/
struct drm_display_mode *
intel_find_panel_downclock(struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector)
{
struct drm_display_mode *scan, *tmp_mode;
int temp_downclock;
 
temp_downclock = fixed_mode->clock;
tmp_mode = NULL;
 
list_for_each_entry(scan, &connector->probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
* mode while they have the different refresh rate, it means
* that the reduced downclock is found. In such
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
if (scan->hdisplay == fixed_mode->hdisplay &&
scan->hsync_start == fixed_mode->hsync_start &&
scan->hsync_end == fixed_mode->hsync_end &&
scan->htotal == fixed_mode->htotal &&
scan->vdisplay == fixed_mode->vdisplay &&
scan->vsync_start == fixed_mode->vsync_start &&
scan->vsync_end == fixed_mode->vsync_end &&
scan->vtotal == fixed_mode->vtotal) {
if (scan->clock < temp_downclock) {
/*
* The downclock is already found. But we
* expect to find the lower downclock.
*/
temp_downclock = scan->clock;
tmp_mode = scan;
}
}
}
 
if (temp_downclock < fixed_mode->clock)
return drm_mode_duplicate(dev, tmp_mode);
else
return NULL;
}
 
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
325,13 → 376,91
pipe_config->gmch_pfit.lvds_border_bits = border;
}
 
static int i915_panel_invert_brightness;
MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Assume that the BIOS does not lie through the OpRegion... */
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
}
 
switch (i915.panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
return connector_status_disconnected;
default:
return connector_status_unknown;
}
}
 
/**
* scale - scale values from one range to another
*
* @source_val: value in range [@source_min..@source_max]
*
* Return @source_val in range [@source_min..@source_max] scaled to range
* [@target_min..@target_max].
*/
static uint32_t scale(uint32_t source_val,
uint32_t source_min, uint32_t source_max,
uint32_t target_min, uint32_t target_max)
{
uint64_t target_val;
 
WARN_ON(source_min > source_max);
WARN_ON(target_min > target_max);
 
/* defensive */
source_val = clamp(source_val, source_min, source_max);
 
/* avoid overflows */
target_val = (uint64_t)(source_val - source_min) *
(target_max - target_min);
do_div(target_val, source_max - source_min);
target_val += target_min;
 
return target_val;
}
 
/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
static inline u32 scale_user_to_hw(struct intel_connector *connector,
u32 user_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
 
return scale(user_level, 0, user_max,
panel->backlight.min, panel->backlight.max);
}
 
/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
* to [hw_min..hw_max]. */
static inline u32 clamp_user_to_hw(struct intel_connector *connector,
u32 user_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
u32 hw_level;
 
hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
 
return hw_level;
}
 
/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
static inline u32 scale_hw_to_user(struct intel_connector *connector,
u32 hw_level, u32 user_max)
{
struct intel_panel *panel = &connector->panel;
 
return scale(hw_level, panel->backlight.min, panel->backlight.max,
0, user_max);
}
 
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
341,10 → 470,10
 
WARN_ON(panel->backlight.max == 0);
 
if (i915_panel_invert_brightness < 0)
if (i915.invert_brightness < 0)
return val;
 
if (i915_panel_invert_brightness > 0 ||
if (i915.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val;
}
491,15 → 620,15
dev_priv->display.set_backlight(connector, level);
}
 
/* set backlight brightness to level in range [0..max] */
void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
u32 max)
/* set backlight brightness to level in range [0..max], scaling wrt hw min */
static void intel_panel_set_backlight(struct intel_connector *connector,
u32 user_level, u32 user_max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 freq;
u32 hw_level;
unsigned long flags;
 
if (!panel->backlight.present || pipe == INVALID_PIPE)
509,19 → 638,41
 
WARN_ON(panel->backlight.max == 0);
 
/* scale to hardware max, but be careful to not overflow */
freq = panel->backlight.max;
if (freq < max)
level = level * freq / max;
else
level = freq / max * level;
hw_level = scale_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
 
panel->backlight.level = level;
// if (panel->backlight.device)
// panel->backlight.device->props.brightness = level;
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
 
/* set backlight brightness to level in range [0..max], assuming hw min is
* respected.
*/
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 user_level, u32 user_max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
unsigned long flags;
 
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
 
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
WARN_ON(panel->backlight.max == 0);
 
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
 
 
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, level);
intel_panel_actually_set_backlight(connector, hw_level);
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
698,7 → 849,7
freq /= 0xff;
 
ctl = freq << 17;
if (IS_GEN2(dev) && panel->backlight.combination_mode)
if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
732,9 → 883,6
ctl = freq << 16;
I915_WRITE(BLC_PWM_CTL, ctl);
 
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
743,6 → 891,8
I915_WRITE(BLC_PWM_CTL2, ctl2);
POSTING_READ(BLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
 
intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
 
static void vlv_enable_backlight(struct intel_connector *connector)
793,9 → 943,6
 
if (panel->backlight.level == 0) {
panel->backlight.level = panel->backlight.max;
// if (panel->backlight.device)
// panel->backlight.device->props.brightness =
// panel->backlight.level;
}
 
dev_priv->display.enable_backlight(connector);
804,28 → 951,6
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
 
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Assume that the BIOS does not lie through the OpRegion... */
if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
}
 
switch (i915_panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
return connector_status_disconnected;
default:
return connector_status_unknown;
}
}
 
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
832,12 → 957,12
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector, bd->props.brightness,
bd->props.max_brightness);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
return 0;
}
 
846,12 → 971,16
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hw_level;
int ret;
 
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->mode_config.mutex);
ret = intel_panel_get_backlight(connector);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
hw_level = intel_panel_get_backlight(connector);
ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
 
drm_modeset_unlock(&dev->mode_config.connection_mutex);
intel_runtime_pm_put(dev_priv);
 
return ret;
870,12 → 999,19
if (WARN_ON(panel->backlight.device))
return -ENODEV;
 
BUG_ON(panel->backlight.max == 0);
WARN_ON(panel->backlight.max == 0);
 
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.brightness = panel->backlight.level;
 
/*
* Note: Everything should work even if the backlight device max
* presented to the userspace is arbitrarily chosen.
*/
props.max_brightness = panel->backlight.max;
props.brightness = scale_hw_to_user(connector,
panel->backlight.level,
props.max_brightness);
 
/*
* Note: using the same name independent of the connector prevents
921,6 → 1057,19
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
* appropriately when it's 0. Use VBT and/or sane defaults.
*/
static u32 get_backlight_min_vbt(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
 
WARN_ON(panel->backlight.max == 0);
 
/* vbt value is a coefficient in range [0..255] */
return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
0, panel->backlight.max);
}
 
static int bdw_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
936,6 → 1085,8
if (!panel->backlight.max)
return -ENODEV;
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = bdw_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
960,6 → 1111,8
if (!panel->backlight.max)
return -ENODEV;
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = pch_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
979,7 → 1132,7
 
ctl = I915_READ(BLC_PWM_CTL);
 
if (IS_GEN2(dev))
if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
 
if (IS_PINEVIEW(dev))
992,6 → 1145,8
if (!panel->backlight.max)
return -ENODEV;
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
1019,6 → 1174,8
if (!panel->backlight.max)
return -ENODEV;
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
1056,6 → 1213,8
if (!panel->backlight.max)
return -ENODEV;
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = _vlv_get_backlight(dev, PIPE_A);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
1074,6 → 1233,15
unsigned long flags;
int ret;
 
if (!dev_priv->vbt.backlight.present) {
if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
} else {
DRM_DEBUG_KMS("no backlight present per VBT\n");
return 0;
}
}
 
/* set level and max in panel struct */
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
ret = dev_priv->display.setup_backlight(intel_connector);
1081,7 → 1249,7
 
if (ret) {
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
drm_get_connector_name(connector));
connector->name);
return ret;
}
 
1107,59 → 1275,6
intel_backlight_device_unregister(intel_connector);
}
 
/**
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
* @dev: drm device
* @fixed_mode : panel native mode
* @connector: LVDS/eDP connector
*
* Return downclock_avail
* Find the reduced downclock for LVDS/eDP in EDID.
*/
struct drm_display_mode *
intel_find_panel_downclock(struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector)
{
struct drm_display_mode *scan, *tmp_mode;
int temp_downclock;
 
temp_downclock = fixed_mode->clock;
tmp_mode = NULL;
 
list_for_each_entry(scan, &connector->probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
* mode while they have the different refresh rate, it means
* that the reduced downclock is found. In such
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
if (scan->hdisplay == fixed_mode->hdisplay &&
scan->hsync_start == fixed_mode->hsync_start &&
scan->hsync_end == fixed_mode->hsync_end &&
scan->htotal == fixed_mode->htotal &&
scan->vdisplay == fixed_mode->vdisplay &&
scan->vsync_start == fixed_mode->vsync_start &&
scan->vsync_end == fixed_mode->vsync_end &&
scan->vtotal == fixed_mode->vtotal) {
if (scan->clock < temp_downclock) {
/*
* The downclock is already found. But we
* expect to find the lower downclock.
*/
temp_downclock = scan->clock;
tmp_mode = scan;
}
}
}
 
if (temp_downclock < fixed_mode->clock)
return drm_mode_duplicate(dev, tmp_mode);
else
return NULL;
}
 
/* Set up chip specific backlight functions */
void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
1199,9 → 1314,11
}
 
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode)
struct drm_display_mode *fixed_mode,
struct drm_display_mode *downclock_mode)
{
panel->fixed_mode = fixed_mode;
panel->downclock_mode = downclock_mode;
 
return 0;
}
/drivers/video/drm/i915/intel_pm.c
51,7 → 51,18
return v;
}
 
union ktime {
s64 tv64;
};
 
typedef union ktime ktime_t; /* Kill this */
 
#define ktime_to_ns(kt) ((kt).tv64)
 
static inline u64 ktime_get_raw_ns(void)
{
return 0; //ktime_to_ns(ktime_get_raw());
}
/**
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
110,12 → 121,11
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int plane, i;
int i;
u32 fbc_ctl;
 
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
127,7 → 137,6
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
138,7 → 147,7
 
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= plane;
fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
}
153,7 → 162,7
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
}
 
168,21 → 177,22
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
u32 dpfc_ctl;
 
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
238,22 → 248,30
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
u32 dpfc_ctl;
 
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
/* Set persistent mode for front-buffer rendering, ala X. */
dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
 
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
296,24 → 314,42
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
 
I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
 
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
 
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw */
I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
HSW_BYPASS_FBC_QUEUE);
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
HSW_FBCQ_DIS);
}
 
I915_WRITE(SNB_DPFC_CTL_SA,
348,11 → 384,11
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
if (work->crtc->fb == work->fb) {
if (work->crtc->primary->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc);
 
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
dev_priv->fbc.fb_id = work->crtc->fb->base.id;
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
 
405,7 → 441,7
}
 
work->crtc = crtc;
work->fb = crtc->fb;
work->fb = crtc->primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
dev_priv->fbc.fbc_work = work;
474,7 → 510,6
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
484,7 → 519,7
return;
}
 
if (!i915_powersave) {
if (!i915.powersave) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
499,7 → 534,7
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
for_each_crtc(dev, tmp_crtc) {
if (intel_crtc_active(tmp_crtc) &&
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
511,7 → 546,7
}
}
 
if (!crtc || crtc->fb == NULL) {
if (!crtc || crtc->primary->fb == NULL) {
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
DRM_DEBUG_KMS("no output, disabling\n");
goto out_disable;
518,18 → 553,16
}
 
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode;
 
if (i915_enable_fbc < 0 &&
INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
if (i915.enable_fbc < 0) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
}
if (!i915_enable_fbc) {
if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
542,8 → 575,11
goto out_disable;
}
 
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
max_width = 4096;
max_height = 4096;
} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
max_width = 2048;
555,7 → 591,7
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
DRM_DEBUG_KMS("plane not A, disabling compression\n");
576,7 → 612,8
if (in_dbg_master())
goto out_disable;
 
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
if (i915_gem_stolen_setup_compression(dev, obj->base.size,
drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
635,7 → 672,7
 
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
 
tmp = I915_READ(CLKCFG);
674,7 → 711,7
 
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 ddrpll, csipll;
 
ddrpll = I915_READ16(DDRMPLL1);
802,14 → 839,35
return NULL;
}
 
static void pineview_disable_cxsr(struct drm_device *dev)
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = dev_priv->dev;
u32 val;
 
/* deactivate cxsr */
I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
if (IS_VALLEYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
} else if (IS_PINEVIEW(dev)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
} else if (IS_I945G(dev) || IS_I945GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
} else if (IS_I915GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
I915_WRITE(INSTPM, val);
} else {
return;
}
 
DRM_DEBUG_KMS("memory self-refresh is %s\n",
enable ? "enabled" : "disabled");
}
 
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
877,95 → 935,95
 
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
.fifo_size = PINEVIEW_DISPLAY_FIFO,
.max_wm = PINEVIEW_MAX_WM,
.default_wm = PINEVIEW_DFT_WM,
.guard_size = PINEVIEW_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_HPLLOFF_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
.fifo_size = PINEVIEW_DISPLAY_FIFO,
.max_wm = PINEVIEW_MAX_WM,
.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
.guard_size = PINEVIEW_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE,
.fifo_size = PINEVIEW_CURSOR_FIFO,
.max_wm = PINEVIEW_CURSOR_MAX_WM,
.default_wm = PINEVIEW_CURSOR_DFT_WM,
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
.fifo_size = PINEVIEW_CURSOR_FIFO,
.max_wm = PINEVIEW_CURSOR_MAX_WM,
.default_wm = PINEVIEW_CURSOR_DFT_WM,
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
.fifo_size = G4X_FIFO_SIZE,
.max_wm = G4X_MAX_WM,
.default_wm = G4X_MAX_WM,
.guard_size = 2,
.cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
.fifo_size = I965_CURSOR_FIFO,
.max_wm = I965_CURSOR_MAX_WM,
.default_wm = I965_CURSOR_DFT_WM,
.guard_size = 2,
.cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_wm_info = {
VALLEYVIEW_FIFO_SIZE,
VALLEYVIEW_MAX_WM,
VALLEYVIEW_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
.fifo_size = VALLEYVIEW_FIFO_SIZE,
.max_wm = VALLEYVIEW_MAX_WM,
.default_wm = VALLEYVIEW_MAX_WM,
.guard_size = 2,
.cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_cursor_wm_info = {
I965_CURSOR_FIFO,
VALLEYVIEW_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
.fifo_size = I965_CURSOR_FIFO,
.max_wm = VALLEYVIEW_CURSOR_MAX_WM,
.default_wm = I965_CURSOR_DFT_WM,
.guard_size = 2,
.cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
I915_FIFO_LINE_SIZE,
.fifo_size = I965_CURSOR_FIFO,
.max_wm = I965_CURSOR_MAX_WM,
.default_wm = I965_CURSOR_DFT_WM,
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
.fifo_size = I945_FIFO_SIZE,
.max_wm = I915_MAX_WM,
.default_wm = 1,
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
.fifo_size = I915_FIFO_SIZE,
.max_wm = I915_MAX_WM,
.default_wm = 1,
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i830_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM,
.default_wm = 1,
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i845_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
.fifo_size = I830_FIFO_SIZE,
.max_wm = I915_MAX_WM,
.default_wm = 1,
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
 
/**
1022,7 → 1080,7
{
struct drm_crtc *crtc, *enabled = NULL;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
for_each_crtc(dev, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
1046,7 → 1104,7
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
pineview_disable_cxsr(dev);
intel_set_memory_cxsr(dev_priv, false);
return;
}
 
1053,7 → 1111,7
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode;
int pixel_size = crtc->fb->bits_per_pixel / 8;
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
int clock;
 
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1097,13 → 1155,9
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
/* activate cxsr */
I915_WRITE(DSPFW3,
I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
intel_set_memory_cxsr(dev_priv, true);
} else {
pineview_disable_cxsr(dev);
DRM_DEBUG_KMS("Self-refresh is disabled\n");
intel_set_memory_cxsr(dev_priv, false);
}
}
 
1133,7 → 1187,7
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
 
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1146,9 → 1200,9
*plane_wm = display->max_wm;
 
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
1220,9 → 1274,9
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->fb->bits_per_pixel / 8;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
 
line_time_us = (htotal * 1000) / clock;
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
 
1234,7 → 1288,7
*display_wm = entries + display->guard_size;
 
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
 
1259,18 → 1313,17
return false;
 
clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
 
entries = (clock / 1000) * pixel_size;
*plane_prec_mult = (entries > 256) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
*plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
pixel_size);
*plane_prec_mult = (entries > 128) ?
DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
*plane_dl = (64 * (*plane_prec_mult) * 4) / entries;
 
entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
*cursor_prec_mult = (entries > 256) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
*cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
*cursor_prec_mult = (entries > 128) ?
DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
*cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
 
return true;
}
1295,9 → 1348,9
if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
&cursor_prec_mult, &cursora_dl)) {
cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
 
I915_WRITE(VLV_DDL1, cursora_prec |
(cursora_dl << DDL_CURSORA_SHIFT) |
1308,9 → 1361,9
if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
&cursor_prec_mult, &cursorb_dl)) {
cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
 
I915_WRITE(VLV_DDL2, cursorb_prec |
(cursorb_dl << DDL_CURSORB_SHIFT) |
1329,6 → 1382,7
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
 
vlv_update_drain_latency(dev);
 
1355,10 → 1409,10
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
cxsr_enabled = true;
} else {
I915_WRITE(FW_BLC_SELF_VLV,
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
cxsr_enabled = false;
intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
 
1378,6 → 1432,9
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
 
static void g4x_update_wm(struct drm_crtc *crtc)
1388,6 → 1445,7
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
 
if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns,
1407,10 → 1465,10
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
cxsr_enabled = true;
} else {
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
cxsr_enabled = false;
intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
 
1431,6 → 1489,9
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
 
static void i965_update_wm(struct drm_crtc *unused_crtc)
1440,6 → 1501,7
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
bool cxsr_enabled;
 
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
1451,11 → 1513,11
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->fb->bits_per_pixel / 8;
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
 
line_time_us = ((htotal * 1000) / clock);
line_time_us = max(htotal * 1000 / clock, 1);
 
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1469,7 → 1531,7
entries, srwm);
 
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * 64;
pixel_size * to_intel_crtc(crtc)->cursor_width;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
1481,13 → 1543,11
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
 
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
cxsr_enabled = true;
} else {
cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
intel_set_memory_cxsr(dev_priv, false);
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1499,6 → 1559,9
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
 
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1524,7 → 1587,7
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = crtc->fb->bits_per_pixel / 8;
int cpp = crtc->primary->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
 
1540,7 → 1603,7
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = crtc->fb->bits_per_pixel / 8;
int cpp = crtc->primary->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
 
1557,6 → 1620,16
 
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
if (IS_I915GM(dev) && enabled) {
struct drm_i915_gem_object *obj;
 
obj = intel_fb_obj(enabled->primary->fb);
 
/* self-refresh seems busted with untiled */
if (obj->tiling_mode == I915_TILING_NONE)
enabled = NULL;
}
 
/*
* Overlay gets an aggressive default since video jitter is bad.
*/
1563,10 → 1636,7
cwm = 2;
 
/* Play safe and disable self-refresh before adjusting watermarks. */
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
intel_set_memory_cxsr(dev_priv, false);
 
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
1577,11 → 1647,11
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
 
line_time_us = (htotal * 1000) / clock;
line_time_us = max(htotal * 1000 / clock, 1);
 
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1612,18 → 1682,9
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
 
if (HAS_FW_BLC(dev)) {
if (enabled) {
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
if (enabled)
intel_set_memory_cxsr(dev_priv, true);
}
}
 
static void i845_update_wm(struct drm_crtc *unused_crtc)
{
1833,6 → 1894,40
return 512;
}
 
static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
int level, bool is_sprite)
{
if (INTEL_INFO(dev)->gen >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
else if (INTEL_INFO(dev)->gen >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
/* ILK/SNB primary plane watermarks */
return level == 0 ? 127 : 511;
else
/* ILK/SNB sprite plane watermarks */
return level == 0 ? 63 : 255;
}
 
static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
int level)
{
if (INTEL_INFO(dev)->gen >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
 
static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen >= 8)
return 31;
else
return 15;
}
 
/* Calculate the maximum primary/sprite plane watermark */
static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
int level,
1841,7 → 1936,6
bool is_sprite)
{
unsigned int fifo_size = ilk_display_fifo_size(dev);
unsigned int max;
 
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
1872,19 → 1966,7
}
 
/* clamp to max that the registers can hold */
if (INTEL_INFO(dev)->gen >= 8)
max = level == 0 ? 255 : 2047;
else if (INTEL_INFO(dev)->gen >= 7)
/* IVB/HSW primary/sprite plane watermarks */
max = level == 0 ? 127 : 1023;
else if (!is_sprite)
/* ILK/SNB primary plane watermarks */
max = level == 0 ? 127 : 511;
else
/* ILK/SNB sprite plane watermarks */
max = level == 0 ? 63 : 255;
 
return min(fifo_size, max);
return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
}
 
/* Calculate the maximum cursor plane watermark */
1897,23 → 1979,10
return 64;
 
/* otherwise just report max that registers can hold */
if (INTEL_INFO(dev)->gen >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
return ilk_cursor_wm_reg_max(dev, level);
}
 
/* Calculate the maximum FBC watermark */
static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
{
/* max that registers can hold */
if (INTEL_INFO(dev)->gen >= 8)
return 31;
else
return 15;
}
 
static void ilk_compute_wm_maximums(struct drm_device *dev,
static void ilk_compute_wm_maximums(const struct drm_device *dev,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
1922,9 → 1991,19
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
max->cur = ilk_cursor_wm_max(dev, level, config);
max->fbc = ilk_fbc_wm_max(dev);
max->fbc = ilk_fbc_wm_reg_max(dev);
}
 
static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
int level,
struct ilk_wm_maximums *max)
{
max->pri = ilk_plane_wm_reg_max(dev, level, false);
max->spr = ilk_plane_wm_reg_max(dev, level, true);
max->cur = ilk_cursor_wm_reg_max(dev, level);
max->fbc = ilk_fbc_wm_reg_max(dev);
}
 
static bool ilk_validate_wm_level(int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
1966,7 → 2045,7
return ret;
}
 
static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
int level,
const struct ilk_pipe_wm_parameters *p,
struct intel_wm_level *result)
2061,7 → 2140,7
wm[3] *= 2;
}
 
static int ilk_wm_max_level(const struct drm_device *dev)
int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2097,10 → 2176,47
}
}
 
static void intel_setup_wm_latency(struct drm_device *dev)
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5], uint16_t min)
{
int level, max_level = ilk_wm_max_level(dev_priv->dev);
 
if (wm[0] >= min)
return false;
 
wm[0] = max(wm[0], min);
for (level = 1; level <= max_level; level++)
wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
 
return true;
}
 
static void snb_wm_latency_quirk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool changed;
 
/*
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
 
if (!changed)
return;
 
DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
}
 
static void ilk_setup_wm_latency(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
 
memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2114,11 → 2230,13
intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
 
if (IS_GEN6(dev))
snb_wm_latency_quirk(dev);
}
 
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
struct ilk_pipe_wm_parameters *p,
struct intel_wm_config *config)
struct ilk_pipe_wm_parameters *p)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2125,30 → 2243,45
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
 
p->active = intel_crtc_active(crtc);
if (p->active) {
if (!intel_crtc_active(crtc))
return;
 
p->active = true;
p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
p->cur.horiz_pixels = 64;
p->cur.horiz_pixels = intel_crtc->cursor_width;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
p->cur.enabled = true;
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
config->num_pipes_active += intel_crtc_active(crtc);
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
if (intel_plane->pipe == pipe)
if (intel_plane->pipe == pipe) {
p->spr = intel_plane->wm;
break;
}
}
}
 
config->sprites_enabled |= intel_plane->wm.enabled;
config->sprites_scaled |= intel_plane->wm.scaled;
static void ilk_compute_wm_config(struct drm_device *dev,
struct intel_wm_config *config)
{
struct intel_crtc *intel_crtc;
 
/* Compute the currently _active_ config */
for_each_intel_crtc(dev, intel_crtc) {
const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
 
if (!wm->pipe_enabled)
continue;
 
config->sprites_enabled |= wm->sprites_enabled;
config->sprites_scaled |= wm->sprites_scaled;
config->num_pipes_active++;
}
}
 
2158,7 → 2291,7
struct intel_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_i915_private *dev_priv = dev->dev_private;
int level, max_level = ilk_wm_max_level(dev);
/* LP0 watermark maximums depend on this pipe alone */
struct intel_wm_config config = {
2168,8 → 2301,9
};
struct ilk_wm_maximums max;
 
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
pipe_wm->pipe_enabled = params->active;
pipe_wm->sprites_enabled = params->spr.enabled;
pipe_wm->sprites_scaled = params->spr.scaled;
 
/* ILK/SNB: LP2+ watermarks only w/o sprites */
if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2179,17 → 2313,39
if (params->spr.scaled)
max_level = 0;
 
for (level = 0; level <= max_level; level++)
ilk_compute_wm_level(dev_priv, level, params,
&pipe_wm->wm[level]);
ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
 
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
 
/* At least LP0 must be valid */
return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
return false;
 
ilk_compute_wm_reg_maximums(dev, 1, &max);
 
for (level = 1; level <= max_level; level++) {
struct intel_wm_level wm = {};
 
ilk_compute_wm_level(dev_priv, level, params, &wm);
 
/*
* Disable any watermark level that exceeds the
* register maximums since such watermarks are
* always invalid.
*/
if (!ilk_validate_wm_level(level, &max, &wm))
break;
 
pipe_wm->wm[level] = wm;
}
 
return true;
}
 
/*
* Merge the watermarks from all active pipes for a specific level.
*/
2199,12 → 2355,22
{
const struct intel_crtc *intel_crtc;
 
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
const struct intel_wm_level *wm =
&intel_crtc->wm.active.wm[level];
ret_wm->enable = true;
 
for_each_intel_crtc(dev, intel_crtc) {
const struct intel_pipe_wm *active = &intel_crtc->wm.active;
const struct intel_wm_level *wm = &active->wm[level];
 
if (!active->pipe_enabled)
continue;
 
/*
* The watermark values may have been used in the past,
* so we must maintain them in the registers for some
* time even if the level is now disabled.
*/
if (!wm->enable)
return;
ret_wm->enable = false;
 
ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2211,8 → 2377,6
ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
}
 
ret_wm->enable = true;
}
 
/*
2224,6 → 2388,7
struct intel_pipe_wm *merged)
{
int level, max_level = ilk_wm_max_level(dev);
int last_enabled_level = max_level;
 
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2239,8 → 2404,11
 
ilk_merge_wm_level(dev, level, wm);
 
if (!ilk_validate_wm_level(level, max, wm))
break;
if (level > last_enabled_level)
wm->enable = false;
else if (!ilk_validate_wm_level(level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
 
/*
* The spec says it is preferred to disable
2247,6 → 2415,7
* FBC WMs instead of disabling a WM level.
*/
if (wm->fbc_val > max->fbc) {
if (wm->enable)
merged->fbc_wm_enabled = false;
wm->fbc_val = 0;
}
2302,14 → 2471,19
level = ilk_wm_lp_to_level(wm_lp, merged);
 
r = &merged->wm[level];
if (!r->enable)
break;
 
results->wm_lp[wm_lp - 1] = WM3_LP_EN |
/*
* Maintain the watermark values even if the level is
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
(r->pri_val << WM1_LP_SR_SHIFT) |
r->cur_val;
 
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
 
if (INTEL_INFO(dev)->gen >= 8)
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2317,6 → 2491,10
results->wm_lp[wm_lp - 1] |=
r->fbc_val << WM1_LP_FBC_SHIFT;
 
/*
* Always set WM1S_LP_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
WARN_ON(wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2325,7 → 2503,7
}
 
/* LP0 register values */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
for_each_intel_crtc(dev, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.wm[0];
2560,7 → 2738,7
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct intel_wm_config config = {};
 
ilk_compute_wm_parameters(crtc, &params, &config);
ilk_compute_wm_parameters(crtc, &params);
 
intel_compute_pipe_wm(crtc, &params, &pipe_wm);
 
2569,6 → 2747,8
 
intel_crtc->wm.active = pipe_wm;
 
ilk_compute_wm_config(dev, &config);
 
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
 
2591,10 → 2771,11
ilk_write_wm_values(dev_priv, &results);
}
 
static void ilk_update_sprite_wm(struct drm_plane *plane,
static void
ilk_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled)
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enabled, bool scaled)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
2602,6 → 2783,7
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
 
/*
2635,7 → 2817,9
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
if (intel_crtc_active(crtc)) {
active->pipe_enabled = intel_crtc_active(crtc);
 
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
 
/*
2668,7 → 2852,7
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
for_each_crtc(dev, crtc)
ilk_pipe_wm_get_hw_state(crtc);
 
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2676,8 → 2860,10
hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
 
hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
if (INTEL_INFO(dev)->gen >= 7) {
hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
}
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2732,13 → 2918,16
 
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
uint32_t sprite_width,
uint32_t sprite_height,
int pixel_size,
bool enabled, bool scaled)
{
struct drm_i915_private *dev_priv = plane->dev->dev_private;
 
if (dev_priv->display.update_sprite_wm)
dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
dev_priv->display.update_sprite_wm(plane, crtc,
sprite_width, sprite_height,
pixel_size, enabled, scaled);
}
 
2756,7 → 2945,7
return NULL;
}
 
ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
2771,7 → 2960,7
return ctx;
 
err_unpin:
i915_gem_object_unpin(ctx);
i915_gem_object_ggtt_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
return NULL;
2871,9 → 3060,9
 
dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
dev_priv->ips.last_time1 = jiffies_to_msecs(GetTimerTicks());
dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
dev_priv->ips.last_count2 = I915_READ(0x112f4);
getrawmonotonic(&dev_priv->ips.last_time2);
dev_priv->ips.last_time2 = ktime_get_raw_ns();
 
spin_unlock_irq(&mchdev_lock);
}
2919,9 → 3108,9
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
limits = dev_priv->rps.max_delay << 24;
if (val <= dev_priv->rps.min_delay)
limits |= dev_priv->rps.min_delay << 16;
limits = dev_priv->rps.max_freq_softlimit << 24;
if (val <= dev_priv->rps.min_freq_softlimit)
limits |= dev_priv->rps.min_freq_softlimit << 16;
 
return limits;
}
2933,26 → 3122,26
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
case LOW_POWER:
if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
 
case BETWEEN:
if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
new_power = LOW_POWER;
else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
new_power = HIGH_POWER;
break;
 
case HIGH_POWER:
if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
}
/* Max/min bins are special */
if (val == dev_priv->rps.min_delay)
if (val == dev_priv->rps.min_freq_softlimit)
new_power = LOW_POWER;
if (val == dev_priv->rps.max_delay)
if (val == dev_priv->rps.max_freq_softlimit)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
3018,20 → 3207,48
dev_priv->rps.last_adj = 0;
}
 
static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
u32 mask = 0;
 
if (val > dev_priv->rps.min_freq_softlimit)
mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_THRESHOLD;
 
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
 
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
mask |= GEN6_PM_RP_UP_EI_EXPIRED;
 
if (IS_GEN8(dev_priv->dev))
mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
 
return ~mask;
}
 
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
if (val == dev_priv->rps.cur_delay)
return;
 
/* min/max delay may still have been modified so be sure to
* write the limits value.
*/
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
 
if (IS_HASWELL(dev))
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
3039,20 → 3256,66
GEN6_FREQUENCY(val) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
}
 
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
gen6_rps_limits(dev_priv, val));
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
POSTING_READ(GEN6_RPNSWREQ);
 
dev_priv->rps.cur_delay = val;
 
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(val * 50);
}
 
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
*
* * If Gfx is Idle, then
* 1. Mask Turbo interrupts
* 2. Bring up Gfx clock
* 3. Change the freq to Rpn and wait till P-Unit updates freq
* 4. Clear the Force GFX CLK ON bit so that Gfx can down
* 5. Unmask Turbo interrupts
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
/* Latest VLV doesn't need to force the gfx clock */
if (dev->pdev->revision >= 0xd) {
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
return;
}
 
/*
* When we are idle. Drop to min voltage state.
*/
 
if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
return;
 
/* Mask turbo interrupt so that they will not come in between */
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
 
vlv_force_gfx_clock(dev_priv, true);
 
dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
 
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
dev_priv->rps.min_freq_softlimit);
 
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 5))
DRM_ERROR("timed out waiting for Punit\n");
 
vlv_force_gfx_clock(dev_priv, false);
 
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
}
 
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
3059,10 → 3322,12
 
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
if (IS_CHERRYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
else if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
3075,9 → 3340,9
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
3088,30 → 3353,50
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay,
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq,
vlv_gpu_freq(dev_priv, val), val);
 
if (val == dev_priv->rps.cur_delay)
return;
 
if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 
dev_priv->rps.cur_delay = val;
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
 
static void gen8_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
~dev_priv->pm_rps_events);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
* leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
* gen8_enable_rps will clean up. */
 
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
 
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
}
 
static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
~dev_priv->pm_rps_events);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
3121,7 → 3406,7
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
 
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
}
 
static void gen6_disable_rps(struct drm_device *dev)
3131,9 → 3416,21
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
 
if (IS_BROADWELL(dev))
gen8_disable_rps_interrupts(dev);
else
gen6_disable_rps_interrupts(dev);
}
 
static void cherryview_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_RC_CONTROL, 0);
 
gen8_disable_rps_interrupts(dev);
}
 
static void valleyview_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3141,78 → 3438,111
I915_WRITE(GEN6_RC_CONTROL, 0);
 
gen6_disable_rps_interrupts(dev);
 
if (dev_priv->vlv_pctx) {
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
dev_priv->vlv_pctx = NULL;
}
}
 
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
{
if (IS_GEN6(dev))
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
 
if (IS_HASWELL(dev))
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
 
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
if (IS_VALLEYVIEW(dev)) {
if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
mode = GEN6_RC_CTL_RC6_ENABLE;
else
mode = 0;
}
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
}
 
int intel_enable_rc6(const struct drm_device *dev)
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
{
/* No RC6 before Ironlake */
if (INTEL_INFO(dev)->gen < 5)
return 0;
 
/* RC6 is only on Ironlake mobile not on desktop */
if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
return 0;
 
/* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
if (enable_rc6 >= 0) {
int mask;
 
if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
mask = INTEL_RC6_ENABLE;
 
if ((enable_rc6 & mask) != enable_rc6)
DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
enable_rc6 & mask, enable_rc6, mask);
 
return enable_rc6 & mask;
}
 
/* Disable RC6 on Ironlake */
if (INTEL_INFO(dev)->gen == 5)
return 0;
 
if (IS_HASWELL(dev))
return INTEL_RC6_ENABLE;
if (IS_IVYBRIDGE(dev))
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 
/* snb/ivb have more than one rc6 state. */
if (INTEL_INFO(dev)->gen == 6)
return INTEL_RC6_ENABLE;
}
 
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
int intel_enable_rc6(const struct drm_device *dev)
{
return i915.enable_rc6;
}
 
static void gen8_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enabled_intrs;
 
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
/* only unmask PM interrupts we need. Mask all others. */
enabled_intrs = GEN6_PM_RPS_EVENTS;
static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
{
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
/* XXX: only BYT has a special efficient freq */
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
 
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
 
I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
if (dev_priv->rps.min_freq_softlimit == 0)
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
}
 
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0, rp_state_cap;
int unused;
 
3227,6 → 3557,7
I915_WRITE(GEN6_RC_CONTROL, 0);
 
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
parse_rp_state_cap(dev_priv, rp_state_cap);
 
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3235,26 → 3566,36
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
 
/* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
intel_print_rc6_info(dev, rc6_mask);
if (IS_BROADWELL(dev))
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
rc6_mask);
else
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
 
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(dev_priv->rps.rp1_freq));
I915_WRITE(GEN6_RC_VIDEO_FREQ,
HSW_FREQUENCY(dev_priv->rps.rp1_freq));
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
 
/* Docs recommend 900MHz, and 300 MHz respectively */
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
dev_priv->rps.max_delay << 24 |
dev_priv->rps.min_delay << 16);
dev_priv->rps.max_freq_softlimit << 24 |
dev_priv->rps.min_freq_softlimit << 16);
 
I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3276,7 → 3617,7
 
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
 
gen6_enable_rps_interrupts(dev);
gen8_enable_rps_interrupts(dev);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
3284,10 → 3625,10
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
int i, ret;
3313,13 → 3654,7
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
/* In units of 50MHz */
dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
dev_priv->rps.cur_delay = 0;
parse_rp_state_cap(dev_priv, rp_state_cap);
 
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
3368,21 → 3703,19
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
if (!ret) {
pcu_mbox = 0;
if (ret)
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
 
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
(dev_priv->rps.max_delay & 0xff) * 50,
(dev_priv->rps.max_freq_softlimit & 0xff) * 50,
(pcu_mbox & 0xff) * 50);
dev_priv->rps.hw_max = pcu_mbox & 0xff;
dev_priv->rps.max_freq = pcu_mbox & 0xff;
}
} else {
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
 
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
gen6_enable_rps_interrupts(dev);
 
3403,7 → 3736,7
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
void gen6_update_ring_freq(struct drm_device *dev)
static void __gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
3416,8 → 3749,8
 
max_ia_freq = cpufreq_quick_get_max(0);
/*
* Default to measured freq if none found, PCU will ensure we don't go
* over
* Default to measured freq if none found, PCU will ensure we
* don't go over
*/
max_ia_freq = tsc_khz;
 
3433,9 → 3766,9
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
gpu_freq--) {
int diff = dev_priv->rps.max_delay - gpu_freq;
int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
 
if (INTEL_INFO(dev)->gen >= 8) {
3468,12 → 3801,74
}
}
 
int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
return;
 
mutex_lock(&dev_priv->rps.hw_lock);
__gen6_update_ring_freq(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
 
val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
 
return rp0;
}
 
static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
{
u32 val, rpe;
 
val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
 
return rpe;
}
 
static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp1;
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
 
return rp1;
}
 
static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
{
u32 val, rpn;
 
val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
return rpn;
}
 
static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp1;
 
val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
 
rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
 
return rp1;
}
 
static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
 
val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
 
rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
/* Clamp to max */
rp0 = min_t(u32, rp0, 0xea);
3493,11 → 3888,49
return rpe;
}
 
int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
 
/* Check that the pctx buffer wasn't move under us. */
static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
{
unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
 
WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
dev_priv->vlv_pctx->stolen->start);
}
 
 
/* Check that the pcbr address is not empty. */
static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
{
unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
 
WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
}
 
static void cherryview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pctx_paddr, paddr;
struct i915_gtt *gtt = &dev_priv->gtt;
u32 pcbr;
int pctx_size = 32*1024;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
paddr = (dev_priv->mm.stolen_base +
(gtt->stolen_size - pctx_size));
 
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
}
}
 
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3506,6 → 3939,8
u32 pcbr;
int pctx_size = 24*1024;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
3540,15 → 3975,203
dev_priv->vlv_pctx = pctx;
}
 
static void valleyview_cleanup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (WARN_ON(!dev_priv->vlv_pctx))
return;
 
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
dev_priv->vlv_pctx = NULL;
}
 
static void valleyview_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
valleyview_setup_pctx(dev);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
 
dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
 
dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
 
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
 
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
 
if (dev_priv->rps.min_freq_softlimit == 0)
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
 
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void cherryview_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
cherryview_setup_pctx(dev);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
 
dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
 
dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
 
dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
 
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
 
if (dev_priv->rps.min_freq_softlimit == 0)
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
 
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
{
valleyview_cleanup_pctx(dev);
}
 
static void cherryview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
int i;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
gtfifodbg = I915_READ(GTFIFODBG);
if (gtfifodbg) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
 
cherryview_check_pctx(dev_priv);
 
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
/* 2a: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
 
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
 
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
 
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
 
/* For now we assume BIOS is allocating and populating the PCBR */
pcbr = I915_READ(VLV_PCBR);
 
DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
 
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
 
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
/* WaDisablePwrmtrEvent:chv (pre-production hw) */
I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
 
/* 5: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
 
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
 
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
gen8_enable_rps_interrupts(dev);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
u32 gtfifodbg, val, rc6_mode = 0;
int i;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
valleyview_check_pctx(dev_priv);
 
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
3555,8 → 4178,6
I915_WRITE(GTFIFODBG, gtfifodbg);
}
 
valleyview_setup_pctx(dev);
 
/* If VLV, Forcewake all wells, else re-direct to regular path */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
3566,6 → 4187,7
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
3586,9 → 4208,11
 
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
_MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
VLV_RENDER_RC0_COUNT_EN |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
 
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
3601,32 → 4225,16
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
dev_priv->rps.cur_delay = (val >> 8) & 0xff;
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay);
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
 
dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.hw_max = dev_priv->rps.max_delay;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
dev_priv->rps.max_delay);
 
dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
 
dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
dev_priv->rps.min_delay);
 
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay);
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
 
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
gen6_enable_rps_interrupts(dev);
 
3638,13 → 4246,13
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->ips.renderctx) {
i915_gem_object_unpin(dev_priv->ips.renderctx);
i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
dev_priv->ips.renderctx = NULL;
}
 
if (dev_priv->ips.pwrctx) {
i915_gem_object_unpin(dev_priv->ips.pwrctx);
i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
dev_priv->ips.pwrctx = NULL;
}
3690,7 → 4298,7
static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
bool was_interruptible;
int ret;
 
3748,7 → 4356,7
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 
intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
}
 
static unsigned long intel_pxfreq(u32 vidfreq)
3784,7 → 4392,7
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
unsigned long now = jiffies_to_msecs(GetTimerTicks()), diff1;
unsigned long now = jiffies_to_msecs(jiffies), diff1;
int i;
 
assert_spin_locked(&mchdev_lock);
3836,9 → 4444,10
 
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
unsigned long val;
 
if (dev_priv->info->gen != 5)
if (INTEL_INFO(dev)->gen != 5)
return 0;
 
spin_lock_irq(&mchdev_lock);
3867,6 → 4476,7
 
static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
struct drm_device *dev = dev_priv->dev;
static const struct v_table {
u16 vd; /* in .1 mil */
u16 vm; /* in .1 mil */
4000,7 → 4610,7
{ 16000, 14875, },
{ 16125, 15000, },
};
if (dev_priv->info->is_mobile)
if (INTEL_INFO(dev)->is_mobile)
return v_table[pxvid].vm;
else
return v_table[pxvid].vd;
4008,18 → 4618,16
 
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
struct timespec now, diff1;
u64 diff;
unsigned long diffms;
u64 now, diff, diffms;
u32 count;
 
assert_spin_locked(&mchdev_lock);
 
getrawmonotonic(&now);
diff1 = timespec_sub(now, dev_priv->ips.last_time2);
now = ktime_get_raw_ns();
diffms = now - dev_priv->ips.last_time2;
do_div(diffms, NSEC_PER_MSEC);
 
/* Don't divide by 0 */
diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
if (!diffms)
return;
 
4043,7 → 4651,9
 
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
if (dev_priv->info->gen != 5)
struct drm_device *dev = dev_priv->dev;
 
if (INTEL_INFO(dev)->gen != 5)
return;
 
spin_lock_irq(&mchdev_lock);
4060,7 → 4670,7
 
assert_spin_locked(&mchdev_lock);
 
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
 
4092,9 → 4702,10
 
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
unsigned long val;
 
if (dev_priv->info->gen != 5)
if (INTEL_INFO(dev)->gen != 5)
return 0;
 
spin_lock_irq(&mchdev_lock);
4197,7 → 4808,7
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
struct intel_ring_buffer *ring;
struct intel_engine_cs *ring;
bool ret = false;
int i;
 
4283,6 → 4894,7
i915_mch_dev = NULL;
spin_unlock_irq(&mchdev_lock);
}
 
static void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4354,21 → 4966,64
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
 
void intel_init_gt_powersave(struct drm_device *dev)
{
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
 
if (IS_CHERRYVIEW(dev))
cherryview_init_gt_powersave(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_init_gt_powersave(dev);
}
 
void intel_cleanup_gt_powersave(struct drm_device *dev)
{
if (IS_CHERRYVIEW(dev))
return;
else if (IS_VALLEYVIEW(dev))
valleyview_cleanup_gt_powersave(dev);
}
 
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev: drm device
*
* We don't want to disable RC6 or other features here, we just want
* to make sure any work we've queued has finished and won't bother
* us while we're suspended.
*/
void intel_suspend_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Interrupts should be disabled already to avoid re-arming. */
WARN_ON(intel_irqs_enabled(dev_priv));
 
// flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
cancel_work_sync(&dev_priv->rps.work);
 
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
}
 
void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Interrupts should be disabled already to avoid re-arming. */
WARN_ON(dev->irq_enabled);
WARN_ON(intel_irqs_enabled(dev_priv));
 
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6) {
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
cancel_work_sync(&dev_priv->rps.work);
intel_suspend_gt_powersave(dev);
 
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev))
if (IS_CHERRYVIEW(dev))
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
4386,17 → 5041,21
 
mutex_lock(&dev_priv->rps.hw_lock);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
gen6_update_ring_freq(dev);
__gen6_update_ring_freq(dev);
} else {
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
__gen6_update_ring_freq(dev);
}
dev_priv->rps.enabled = true;
mutex_unlock(&dev_priv->rps.hw_lock);
 
intel_runtime_pm_put(dev_priv);
}
 
void intel_enable_gt_powersave(struct drm_device *dev)
4404,20 → 5063,38
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_IRONLAKE_M(dev)) {
mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
* to make resume and init faster.
*
* We depend on the HW RC6 power context save/restore
* mechanism when entering D3 through runtime PM suspend. So
* disable RPM until RPS/RC6 is properly setup. We can only
* get here via the driver load/system resume/runtime resume
* paths, so the _noresume version is enough (and in case of
* runtime resume it's necessary).
*/
schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
round_jiffies_up_relative(HZ));
if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
round_jiffies_up_relative(HZ)))
intel_runtime_pm_get_noresume(dev_priv);
}
}
 
void intel_reset_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->rps.enabled = false;
intel_enable_gt_powersave(dev);
}
 
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4523,6 → 5200,9
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
/* WaDisable_RenderCache_OperationalFlush:ilk */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
g4x_disable_trickle_feed(dev);
 
ibx_init_clock_gating(dev);
4571,12 → 5251,10
uint32_t tmp;
 
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
DRM_INFO("This can cause pipe underruns and display issues.\n");
DRM_INFO("Please upgrade your BIOS to fix this.\n");
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
tmp);
}
}
 
static void gen6_init_clock_gating(struct drm_device *dev)
{
4598,6 → 5276,20
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
 
/* WaDisable_RenderCache_OperationalFlush:snb */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
/*
* BSpec recoomends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN6_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
ilk_init_lp_watermarks(dev);
 
I915_WRITE(CACHE_MODE_0,
4618,19 → 5310,26
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
* Also apply WaDisableVDSUnitClockGating:snb and
* WaDisableRCPBUnitClockGating:snb.
* WaDisableRCCUnitClockGating:snb
* WaDisableRCPBUnitClockGating:snb
*/
I915_WRITE(GEN6_UCGCTL2,
GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
/* Bspec says we need to always set all mask bits. */
I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
/* WaStripsFansDisableFastClipPerformanceFix:snb */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
 
/*
* Bspec says:
* "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
* 3DSTATE_SF number of SF output attributes is more than 16."
*/
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
 
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
* The bit21 and bit22 of 0x42000
4654,11 → 5353,6
 
g4x_disable_trickle_feed(dev);
 
/* The default value should be 0x200 according to docs, but the two
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
 
cpt_init_clock_gating(dev);
 
gen6_check_mch_setup(dev);
4668,14 → 5362,17
{
uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
 
/*
* WaVSThreadDispatchOverride:ivb,vlv
*
* This actually overrides the dispatch
* mode for all thread types.
*/
reg &= ~GEN7_FF_SCHED_MASK;
reg |= GEN7_FF_TS_SCHED_HW;
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
 
if (IS_HASWELL(dev_priv->dev))
reg &= ~GEN7_FF_VS_REF_CNT_FFME;
 
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
 
4713,7 → 5410,7
static void gen8_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe i;
enum pipe pipe;
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
4722,8 → 5419,19
/* FIXME(BDW): Check all the w/a, some might only apply to
* pre-production hw. */
 
WARN(!i915_preliminary_hw_support,
"GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
/* WaDisablePartialInstShootdown:bdw */
I915_WRITE(GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
 
/* WaDisableThreadStallDopClockGating:bdw */
/* FIXME: Unclear whether we really need this on production bdw. */
I915_WRITE(GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
 
/*
* This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
* pre-production hardware
*/
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
I915_WRITE(HALF_SLICE_CHICKEN3,
4731,7 → 5439,7
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
 
I915_WRITE(_3D_CHICKEN3,
_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
_MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
 
I915_WRITE(COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
4739,6 → 5447,10
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
 
/* WaDisableDopClockGating:bdw May not be needed for production */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
4747,10 → 5459,10
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
 
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(i) {
I915_WRITE(CHICKEN_PIPESL_1(i),
I915_READ(CHICKEN_PIPESL_1(i) |
DPRS_MASK_VBLANK_SRD));
for_each_pipe(pipe) {
I915_WRITE(CHICKEN_PIPESL_1(pipe),
I915_READ(CHICKEN_PIPESL_1(pipe)) |
BDW_DPRS_MASK_VBLANK_SRD);
}
 
/* Use Force Non-Coherent whenever executing a 3D context. This is a
4766,6 → 5478,28
I915_WRITE(GEN7_FF_THREAD_MODE,
I915_READ(GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
 
/* WaDisableSDEUnitClockGating:bdw */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
/* Wa4x4STCOptimizationDisable:bdw */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
}
 
static void haswell_init_clock_gating(struct drm_device *dev)
4774,21 → 5508,6
 
ilk_init_lp_watermarks(dev);
 
/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:hsw workaround.
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
/* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
/* WaApplyL3ControlAndL3ChickenMode:hsw */
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
 
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
I915_WRITE(HSW_ROW_CHICKEN3,
4800,12 → 5519,31
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
/* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv);
I915_WRITE(GEN7_FF_THREAD_MODE,
I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
 
/* WaDisable_RenderCache_OperationalFlush:hsw */
I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
/* enable HiZ Raw Stall Optimization */
I915_WRITE(CACHE_MODE_0_GEN7,
_MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
 
/* WaDisable4x2SubspanOptimization:hsw */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
4838,10 → 5576,10
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
else
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
/* WaDisable_RenderCache_OperationalFlush:ivb */
I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4854,31 → 5592,24
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else
else {
/* must write both registers */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
 
 
/* WaForceL3Serialization:ivb */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
/*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
/* This is required by WaCatErrorRejectionIssue:ivb */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4887,13 → 5618,29
 
g4x_disable_trickle_feed(dev);
 
/* WaVSRefCountFullforceMissDisable:ivb */
gen7_setup_fixed_func_scheduler(dev_priv);
 
if (0) { /* causes HiZ corruption on ivb:gt1 */
/* enable HiZ Raw Stall Optimization */
I915_WRITE(CACHE_MODE_0_GEN7,
_MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
}
 
/* WaDisable4x2SubspanOptimization:ivb */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
4915,14 → 5662,12
mutex_unlock(&dev_priv->rps.hw_lock);
switch ((val >> 6) & 3) {
case 0:
case 1:
dev_priv->mem_freq = 800;
break;
case 1:
case 2:
dev_priv->mem_freq = 1066;
break;
case 2:
dev_priv->mem_freq = 1333;
break;
case 3:
dev_priv->mem_freq = 1333;
break;
4940,19 → 5685,15
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
/* WaPsdDispatchEnable:vlv */
/* WaDisablePSDDualDispatchEnable:vlv */
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
/* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaDisable_RenderCache_OperationalFlush:vlv */
I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
/* WaApplyL3ControlAndL3ChickenMode:vlv */
I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
 
/* WaForceL3Serialization:vlv */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4966,53 → 5707,126
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
gen7_setup_fixed_func_scheduler(dev_priv);
 
/*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
*
* Also apply WaDisableVDSUnitClockGating:vlv and
* WaDisableRCPBUnitClockGating:vlv.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
/* WaDisableL3Bank2xClockGate:vlv
* Disabling L3 clock gating- MMIO 940c[25] = 1
* Set bit 25, to disable L3_BANK_2x_CLK_GATING */
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
 
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization isn't listed for VLV.
*/
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
 
/*
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
 
/* Conservative clock gating settings for now */
I915_WRITE(0x9400, 0xffffffff);
I915_WRITE(0x9404, 0xffffffff);
I915_WRITE(0x9408, 0xffffffff);
I915_WRITE(0x940c, 0xffffffff);
I915_WRITE(0x9410, 0xffffffff);
I915_WRITE(0x9414, 0xffffffff);
I915_WRITE(0x9418, 0xffffffff);
static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
mutex_unlock(&dev_priv->rps.hw_lock);
switch ((val >> 2) & 0x7) {
case 0:
case 1:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
dev_priv->mem_freq = 1600;
break;
case 2:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
dev_priv->mem_freq = 1600;
break;
case 3:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
dev_priv->mem_freq = 2000;
break;
case 4:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
dev_priv->mem_freq = 1600;
break;
case 5:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
dev_priv->mem_freq = 1600;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
/* WaDisablePartialInstShootdown:chv */
I915_WRITE(GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
 
/* WaDisableThreadStallDopClockGating:chv */
I915_WRITE(GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
 
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
I915_READ(GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 
/* WaDisableSemaphoreAndSyncFlipWait:chv */
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
 
/* WaDisableCSUnitClockGating:chv */
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableSamplerPowerBypass:chv (pre-production hw) */
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
 
/* WaDisableGunitClockGating:chv (pre-production hw) */
I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
GINT_DIS);
 
/* WaDisableFfDopClockGating:chv (pre-production hw) */
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
 
/* WaDisableDopClockGating:chv (pre-production hw) */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
 
static void g4x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
5034,6 → 5848,9
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
/* WaDisable_RenderCache_OperationalFlush:g4x */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
g4x_disable_trickle_feed(dev);
}
 
5048,6 → 5865,9
I915_WRITE16(DEUC, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
 
/* WaDisable_RenderCache_OperationalFlush:gen4 */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
 
static void broadwater_init_clock_gating(struct drm_device *dev)
5062,6 → 5882,9
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
 
/* WaDisable_RenderCache_OperationalFlush:gen4 */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
 
static void gen3_init_clock_gating(struct drm_device *dev)
5078,6 → 5901,12
 
/* IIR "flip pending" means done if this bit is set */
I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
 
/* interrupts should cause a wake up from C3 */
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
 
static void i85x_init_clock_gating(struct drm_device *dev)
5085,6 → 5914,10
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
 
/* interrupts should cause a wake up from C3 */
I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
_MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
}
 
static void i830_init_clock_gating(struct drm_device *dev)
5125,58 → 5958,65
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
static bool hsw_power_well_enabled(struct drm_device *dev,
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
 
bool intel_display_power_enabled_sw(struct drm_device *dev,
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
 
power_domains = &dev_priv->power_domains;
 
return power_domains->domain_use_count[domain];
}
 
bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
bool is_enabled;
int i;
 
if (dev_priv->pm.suspended)
return false;
 
power_domains = &dev_priv->power_domains;
 
is_enabled = true;
 
mutex_lock(&power_domains->lock);
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
if (power_well->always_on)
continue;
 
if (!power_well->is_enabled(dev, power_well)) {
if (!power_well->hw_enabled) {
is_enabled = false;
break;
}
}
mutex_unlock(&power_domains->lock);
 
return is_enabled;
}
 
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
bool ret;
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
ret = intel_display_power_enabled_unlocked(dev_priv, domain);
mutex_unlock(&power_domains->lock);
 
return ret;
}
 
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
unsigned long irqflags;
 
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
5192,52 → 6032,16
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
// vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
 
if (IS_BROADWELL(dev)) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
dev_priv->de_irq_mask[PIPE_B]);
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
~dev_priv->de_irq_mask[PIPE_B] |
GEN8_PIPE_VBLANK);
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
dev_priv->de_irq_mask[PIPE_C]);
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
~dev_priv->de_irq_mask[PIPE_C] |
GEN8_PIPE_VBLANK);
POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (IS_BROADWELL(dev))
gen8_irq_power_well_post_enable(dev_priv);
}
}
 
static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
enum pipe p;
unsigned long irqflags;
 
/*
* After this, the registers on the pipes that are part of the power
* well will become zero, so we have to adjust our counters according to
* that.
*
* FIXME: Should we do this in general in drm_vblank_post_modeset?
*/
// spin_lock_irqsave(&dev->vbl_lock, irqflags);
// for_each_pipe(p)
// if (p != PIPE_A)
// dev->vblank[p].last = 0;
// spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
 
static void hsw_set_power_well(struct drm_device *dev,
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
uint32_t tmp;
 
WARN_ON(dev_priv->pc8.enabled);
 
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5260,61 → 6064,272
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
}
}
}
 
hsw_power_well_post_disable(dev_priv);
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
 
/*
* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now.
*/
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
 
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, true);
}
 
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, false);
}
 
static void __intel_power_well_get(struct drm_device *dev,
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
struct drm_i915_private *dev_priv = dev->dev_private;
}
 
if (!power_well->count++ && power_well->set) {
hsw_disable_package_c8(dev_priv);
power_well->set(dev, power_well, true);
static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return true;
}
 
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
enum punit_power_well power_well_id = power_well->data;
u32 mask;
u32 state;
u32 ctrl;
 
mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
PUNIT_PWRGT_PWR_GATE(power_well_id);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
 
if (COND)
goto out;
 
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
ctrl &= ~mask;
ctrl |= state;
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
 
if (wait_for(COND, 100))
DRM_ERROR("timout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
 
#undef COND
 
out:
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void __intel_power_well_put(struct drm_device *dev,
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
struct drm_i915_private *dev_priv = dev->dev_private;
vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
}
 
WARN_ON(!power_well->count);
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, true);
}
 
if (!--power_well->count && power_well->set &&
i915_disable_power_well) {
power_well->set(dev, power_well, false);
hsw_enable_package_c8(dev_priv);
static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, false);
}
 
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int power_well_id = power_well->data;
bool enabled = false;
u32 mask;
u32 state;
u32 ctrl;
 
mask = PUNIT_PWRGT_MASK(power_well_id);
ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
state != PUNIT_PWRGT_PWR_GATE(power_well_id));
if (state == ctrl)
enabled = true;
 
/*
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
WARN_ON(ctrl != state);
 
mutex_unlock(&dev_priv->rps.hw_lock);
 
return enabled;
}
 
void intel_display_power_get(struct drm_device *dev,
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
vlv_set_power_well(dev_priv, power_well, true);
 
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
/*
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
if (dev_priv->power_domains.initializing)
return;
 
intel_hpd_init(dev_priv->dev);
 
i915_redisable_vga_power_on(dev_priv->dev);
}
 
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
vlv_set_power_well(dev_priv, power_well, false);
}
 
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection.
*/
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
vlv_set_power_well(dev_priv, power_well, true);
 
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
* b. The other bits such as sfr settings / modesel may all
* be set to 0.
*
* This should only be done on init and resume from S3 with
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
}
 
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
 
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
for_each_pipe(pipe)
assert_pll_disabled(dev_priv, pipe);
 
/* Assert common reset */
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
 
vlv_set_power_well(dev_priv, power_well, false);
}
 
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
 
if (power_well->always_on || !i915.disable_power_well) {
if (!enabled)
goto mismatch;
 
return;
}
 
if (enabled != (power_well->count > 0))
goto mismatch;
 
return;
 
mismatch:
WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
power_well->name, power_well->always_on, enabled,
power_well->count, i915.disable_power_well);
}
 
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
 
intel_runtime_pm_get(dev_priv);
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
 
for_each_power_well(i, power_well, BIT(domain), power_domains)
__intel_power_well_get(dev, power_well);
for_each_power_well(i, power_well, BIT(domain), power_domains) {
if (!power_well->count++) {
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
 
check_power_well_state(dev_priv, power_well);
}
 
power_domains->domain_use_count[domain]++;
 
mutex_unlock(&power_domains->lock);
}
 
void intel_display_power_put(struct drm_device *dev,
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
5326,61 → 6341,166
WARN_ON(!power_domains->domain_use_count[domain]);
power_domains->domain_use_count[domain]--;
 
for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
__intel_power_well_put(dev, power_well);
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
WARN_ON(!power_well->count);
 
if (!--power_well->count && i915.disable_power_well) {
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
 
check_power_well_state(dev_priv, power_well);
}
 
mutex_unlock(&power_domains->lock);
 
intel_runtime_pm_put(dev_priv);
}
 
static struct i915_power_domains *hsw_pwr;
 
/* Display audio driver power well request */
void i915_request_power_well(void)
int i915_request_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (WARN_ON(!hsw_pwr))
return;
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
 
/* Display audio driver power well release */
void i915_release_power_well(void)
int i915_release_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (WARN_ON(!hsw_pwr))
return;
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
 
/*
* Private interface for the audio driver to get CDCLK in kHz.
*
* Caller must request power well using i915_request_power_well() prior to
* making the call.
*/
int i915_get_cdclk_freq(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
 
return intel_ddi_get_cdclk_freq(dev_priv);
}
EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
 
 
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
BIT(POWER_DOMAIN_INIT))
 
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
HSW_ALWAYS_ON_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
#define BDW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
 
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_always_on_power_well_noop,
.enable = i9xx_always_on_power_well_noop,
.disable = i9xx_always_on_power_well_noop,
.is_enabled = i9xx_always_on_power_well_enabled,
};
 
static struct i915_power_well i9xx_always_on_power_well[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
};
 
static const struct i915_power_well_ops hsw_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = hsw_power_well_enable,
.disable = hsw_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
 
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
.is_enabled = hsw_power_well_enabled,
.set = hsw_set_power_well,
.domains = HSW_DISPLAY_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
},
};
 
5389,23 → 6509,115
.name = "always-on",
.always_on = 1,
.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
.is_enabled = hsw_power_well_enabled,
.set = hsw_set_power_well,
.domains = BDW_DISPLAY_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
},
};
 
static const struct i915_power_well_ops vlv_display_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_display_power_well_enable,
.disable = vlv_display_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_dpio_cmn_power_well_enable,
.disable = vlv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_power_well_enable,
.disable = vlv_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static struct i915_power_well vlv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
{
.name = "dpio-tx-b-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
},
};
 
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
if (power_well->data == power_well_id)
return power_well;
}
 
return NULL;
}
 
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
})
 
int intel_power_domains_init(struct drm_device *dev)
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
mutex_init(&power_domains->lock);
5414,12 → 6626,14
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
if (IS_HASWELL(dev)) {
if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
hsw_pwr = power_domains;
} else if (IS_BROADWELL(dev)) {
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, vlv_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
}
5427,14 → 6641,13
return 0;
}
 
void intel_power_domains_remove(struct drm_device *dev)
void intel_power_domains_remove(struct drm_i915_private *dev_priv)
{
hsw_pwr = NULL;
}
 
static void intel_power_domains_resume(struct drm_device *dev)
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
5441,44 → 6654,71
 
mutex_lock(&power_domains->lock);
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
if (power_well->set)
power_well->set(dev, power_well, power_well->count > 0);
power_well->ops->sync_hw(dev_priv, power_well);
power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
power_well);
}
mutex_unlock(&power_domains->lock);
}
 
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
struct i915_power_well *disp2d =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
 
/* nothing to do if common lane is already off */
if (!cmn->ops->is_enabled(dev_priv, cmn))
return;
 
/* If the display might be already active skip this */
if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
I915_READ(DPIO_CTL) & DPIO_CMNRST)
return;
 
DRM_DEBUG_KMS("toggling display PHY side reset\n");
 
/* cmnlane needs DPLL registers */
disp2d->ops->enable(dev_priv, disp2d);
 
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
* Need to assert and de-assert PHY SB reset by gating the
* common lane power, then un-gating it.
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
void intel_power_domains_init_hw(struct drm_device *dev)
cmn->ops->disable(dev_priv, cmn);
}
 
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev, true);
intel_power_domains_resume(dev);
power_domains->initializing = true;
 
if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
return;
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
}
 
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
power_domains->initializing = false;
}
 
/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
{
hsw_disable_package_c8(dev_priv);
intel_runtime_pm_get(dev_priv);
}
 
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
{
hsw_enable_package_c8(dev_priv);
intel_runtime_pm_put(dev_priv);
}
 
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
5486,14 → 6726,31
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
// pm_runtime_get_sync(device);
WARN(dev_priv->pm.suspended, "Device still suspended.\n");
}
 
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
// pm_runtime_get_noresume(device);
}
 
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
}
5503,10 → 6760,20
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
dev_priv->pm.suspended = false;
if (!HAS_RUNTIME_PM(dev))
return;
 
 
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
if (!intel_enable_rc6(dev)) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
return;
}
 
 
}
 
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
5514,8 → 6781,12
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
if (!intel_enable_rc6(dev))
return;
 
}
 
/* Set up chip specific power management-related functions */
5554,7 → 6825,7
 
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
intel_setup_wm_latency(dev);
ilk_setup_wm_latency(dev);
 
if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
5577,6 → 6848,10
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
5592,7 → 6867,7
(dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
5675,7 → 6950,7
return 0;
}
 
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div;
 
5697,7 → 6972,7
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
}
 
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul;
 
5719,6 → 6994,80
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
 
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div, freq;
 
switch (dev_priv->rps.cz_freq) {
case 200:
div = 5;
break;
case 267:
div = 6;
break;
case 320:
case 333:
case 400:
div = 8;
break;
default:
return -1;
}
 
freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
 
return freq;
}
 
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul, opcode;
 
switch (dev_priv->rps.cz_freq) {
case 200:
mul = 5;
break;
case 267:
mul = 6;
break;
case 320:
case 333:
case 400:
mul = 8;
break;
default:
return -1;
}
 
opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
 
return opcode;
}
 
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int ret = -1;
 
if (IS_CHERRYVIEW(dev_priv->dev))
ret = chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
ret = byt_gpu_freq(dev_priv, val);
 
return ret;
}
 
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int ret = -1;
 
if (IS_CHERRYVIEW(dev_priv->dev))
ret = chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
ret = byt_freq_opcode(dev_priv, val);
 
return ret;
}
 
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
5725,13 → 7074,9
 
mutex_init(&dev_priv->rps.hw_lock);
 
mutex_init(&dev_priv->pc8.lock);
dev_priv->pc8.requirements_met = false;
dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
 
dev_priv->pm.suspended = false;
dev_priv->pm._irqs_disabled = false;
}
/drivers/video/drm/i915/intel_renderstate.h
0,0 → 1,46
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#ifndef _INTEL_RENDERSTATE_H
#define _INTEL_RENDERSTATE_H
 
#include <linux/types.h>
 
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
const u32 batch_items;
};
 
extern const struct intel_renderstate_rodata gen6_null_state;
extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
 
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
.reloc = gen ## _g ## _null_state_relocs, \
.batch = gen ## _g ## _null_state_batch, \
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
}
 
#endif /* INTEL_RENDERSTATE_H */
/drivers/video/drm/i915/intel_renderstate_gen6.c
0,0 → 1,290
#include "intel_renderstate.h"
 
static const u32 gen6_null_state_relocs[] = {
0x00000020,
0x00000024,
0x0000002c,
0x000001e0,
0x000001e4,
-1,
};
 
static const u32 gen6_null_state_batch[] = {
0x69040000,
0x790d0001,
0x00000000,
0x00000000,
0x78180000,
0x00000001,
0x61010008,
0x00000000,
0x00000001, /* reloc */
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001,
0x61020000,
0x00000000,
0x78050001,
0x00000018,
0x00000000,
0x780d1002,
0x00000000,
0x00000000,
0x00000420,
0x78150003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78100004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78160003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78170003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79050005,
0xe0040000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79100000,
0x00000000,
0x79000002,
0xffffffff,
0x00000000,
0x00000000,
0x780e0002,
0x00000441,
0x00000401,
0x00000401,
0x78021002,
0x00000000,
0x00000000,
0x00000400,
0x78130012,
0x00400810,
0x00000000,
0x20000000,
0x04000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78140007,
0x00000280,
0x08080000,
0x00000000,
0x00060000,
0x4e080002,
0x00100400,
0x00000000,
0x00000000,
0x78090005,
0x02000000,
0x22220000,
0x02f60000,
0x11330000,
0x02850004,
0x11220000,
0x78011002,
0x00000000,
0x00000000,
0x00000200,
0x78080003,
0x00002000,
0x00000448, /* reloc */
0x00000448, /* reloc */
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000220, /* state start */
0x00000240,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x0060005a,
0x204077be,
0x000000c0,
0x008d0040,
0x0060005a,
0x206077be,
0x000000c0,
0x008d0080,
0x0060005a,
0x208077be,
0x000000d0,
0x008d0040,
0x0060005a,
0x20a077be,
0x000000d0,
0x008d0080,
0x00000201,
0x20080061,
0x00000000,
0x00000000,
0x00600001,
0x20200022,
0x008d0000,
0x00000000,
0x02800031,
0x21c01cc9,
0x00000020,
0x0a8a0001,
0x00600001,
0x204003be,
0x008d01c0,
0x00000000,
0x00600001,
0x206003be,
0x008d01e0,
0x00000000,
0x00600001,
0x208003be,
0x008d0200,
0x00000000,
0x00600001,
0x20a003be,
0x008d0220,
0x00000000,
0x00600001,
0x20c003be,
0x008d0240,
0x00000000,
0x00600001,
0x20e003be,
0x008d0260,
0x00000000,
0x00600001,
0x210003be,
0x008d0280,
0x00000000,
0x00600001,
0x212003be,
0x008d02a0,
0x00000000,
0x05800031,
0x24001cc8,
0x00000040,
0x90019000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x30000000,
0x00000124,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x80000031,
0x00000003,
0x00000000, /* state end */
};
 
RO_RENDERSTATE(6);
/drivers/video/drm/i915/intel_renderstate_gen7.c
0,0 → 1,254
#include "intel_renderstate.h"
 
static const u32 gen7_null_state_relocs[] = {
0x0000000c,
0x00000010,
0x00000018,
0x000001ec,
-1,
};
 
static const u32 gen7_null_state_batch[] = {
0x69040000,
0x61010008,
0x00000000,
0x00000001, /* reloc */
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001,
0x790d0002,
0x00000000,
0x00000000,
0x00000000,
0x78180000,
0x00000001,
0x79160000,
0x00000008,
0x78300000,
0x02010040,
0x78310000,
0x04000000,
0x78320000,
0x04000000,
0x78330000,
0x02000000,
0x78100004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781b0005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x781d0004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78210000,
0x00000000,
0x78130005,
0x00000000,
0x20000000,
0x04000000,
0x00000000,
0x00000000,
0x00000000,
0x78140001,
0x20000800,
0x00000000,
0x781e0001,
0x00000000,
0x00000000,
0x78050005,
0xe0040000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78040001,
0x00000000,
0x00000000,
0x78240000,
0x00000240,
0x78230000,
0x00000260,
0x782f0000,
0x00000280,
0x781f000c,
0x00400810,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78200006,
0x000002c0,
0x08080000,
0x00000000,
0x28000402,
0x00060000,
0x00000000,
0x00000000,
0x78090005,
0x02000000,
0x22220000,
0x02f60000,
0x11230000,
0x02f60004,
0x11230000,
0x78080003,
0x00006008,
0x00000340, /* reloc */
0xffffffff,
0x00000000,
0x782a0000,
0x00000360,
0x79000002,
0xffffffff,
0x00000000,
0x00000000,
0x7b000005,
0x0000000f,
0x00000003,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000031, /* state start */
0x00000003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000492,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x0080005a,
0x2e2077bd,
0x000000c0,
0x008d0040,
0x0080005a,
0x2e6077bd,
0x000000d0,
0x008d0040,
0x02800031,
0x21801fa9,
0x008d0e20,
0x08840001,
0x00800001,
0x2e2003bd,
0x008d0180,
0x00000000,
0x00800001,
0x2e6003bd,
0x008d01c0,
0x00000000,
0x00800001,
0x2ea003bd,
0x008d0200,
0x00000000,
0x00800001,
0x2ee003bd,
0x008d0240,
0x00000000,
0x05800031,
0x20001fa8,
0x008d0e20,
0x90031000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000380,
0x000003a0,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state end */
};
 
RO_RENDERSTATE(7);
/drivers/video/drm/i915/intel_renderstate_gen8.c
0,0 → 1,480
#include "intel_renderstate.h"
 
static const u32 gen8_null_state_relocs[] = {
0x00000048,
0x00000050,
0x00000060,
0x000003ec,
-1,
};
 
static const u32 gen8_null_state_batch[] = {
0x69040000,
0x61020001,
0x00000000,
0x00000000,
0x79120000,
0x00000000,
0x79130000,
0x00000000,
0x79140000,
0x00000000,
0x79150000,
0x00000000,
0x79160000,
0x00000000,
0x6101000e,
0x00000001,
0x00000000,
0x00000001,
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000000,
0x00000000,
0x00000001, /* reloc */
0x00000000,
0xfffff001,
0x00001001,
0xfffff001,
0x00001001,
0x78230000,
0x000006e0,
0x78210000,
0x00000700,
0x78300000,
0x08010040,
0x78330000,
0x08000000,
0x78310000,
0x08000000,
0x78320000,
0x08000000,
0x78240000,
0x00000641,
0x780e0000,
0x00000601,
0x780d0000,
0x00000000,
0x78180000,
0x00000001,
0x78520003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78190009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781b0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78270000,
0x00000000,
0x782c0000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x78160009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110008,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78290000,
0x00000000,
0x782e0000,
0x00000000,
0x781a0009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781d0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78280000,
0x00000000,
0x782d0000,
0x00000000,
0x78260000,
0x00000000,
0x782b0000,
0x00000000,
0x78150009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78100007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781e0003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x781f0002,
0x30400820,
0x00000000,
0x00000000,
0x78510009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78500003,
0x00210000,
0x00000000,
0x00000000,
0x00000000,
0x78130002,
0x00000000,
0x00000000,
0x00000000,
0x782a0000,
0x00000480,
0x782f0000,
0x00000540,
0x78140000,
0x00000800,
0x78170009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x7820000a,
0x00000580,
0x00000000,
0x08080000,
0x00000000,
0x00000000,
0x1f000002,
0x00060000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x784d0000,
0x40000000,
0x784f0000,
0x80000100,
0x780f0000,
0x00000740,
0x78050006,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78070003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78060003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78040001,
0x00000000,
0x00000001,
0x79000002,
0xffffffff,
0x00000000,
0x00000000,
0x78080003,
0x00006000,
0x000005e0, /* reloc */
0x00000000,
0x00000000,
0x78090005,
0x02000000,
0x22220000,
0x02f60000,
0x11230000,
0x02850004,
0x11230000,
0x784b0000,
0x0000000f,
0x78490001,
0x00000000,
0x00000000,
0x7b000005,
0x00000000,
0x00000003,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x000004c0, /* state start */
0x00000500,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000092,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x0060005a,
0x21403ae8,
0x3a0000c0,
0x008d0040,
0x0060005a,
0x21603ae8,
0x3a0000c0,
0x008d0080,
0x0060005a,
0x21803ae8,
0x3a0000d0,
0x008d0040,
0x0060005a,
0x21a03ae8,
0x3a0000d0,
0x008d0080,
0x02800031,
0x2e0022e8,
0x0e000140,
0x08840001,
0x05800031,
0x200022e0,
0x0e000e00,
0x90031000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x3f800000,
0x00000000,
0x3f800000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state end */
};
 
RO_RENDERSTATE(8);
/drivers/video/drm/i915/intel_ringbuffer.c
33,26 → 33,43
#include "i915_trace.h"
#include "intel_drv.h"
 
static inline int ring_space(struct intel_ring_buffer *ring)
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
* to give some inclination as to some of the magic values used in the various
* workarounds!
*/
#define CACHELINE_BYTES 64
 
static inline int __ring_space(int head, int tail, int size)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
space += size;
return space;
}
 
void __intel_ring_advance(struct intel_ring_buffer *ring)
static inline int ring_space(struct intel_ringbuffer *ringbuf)
{
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
}
 
static bool intel_ring_stopped(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
}
 
ring->tail &= ring->size - 1;
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
void __intel_ring_advance(struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
if (intel_ring_stopped(ring))
return;
ring->write_tail(ring, ring->tail);
ring->write_tail(ring, ringbuf->tail);
}
 
static int
gen2_render_ring_flush(struct intel_ring_buffer *ring,
gen2_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
78,7 → 95,7
}
 
static int
gen4_render_ring_flush(struct intel_ring_buffer *ring,
gen4_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
173,9 → 190,9
* really our business. That leaves only stall at scoreboard.
*/
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
{
u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
 
208,11 → 225,11
}
 
static int
gen6_render_ring_flush(struct intel_ring_buffer *ring,
gen6_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
/* Force SNB workarounds for PIPE_CONTROL flushes */
260,7 → 277,7
}
 
static int
gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
{
int ret;
 
278,7 → 295,7
return 0;
}
 
static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
{
int ret;
 
302,11 → 319,11
}
 
static int
gen7_render_ring_flush(struct intel_ring_buffer *ring,
gen7_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
/*
363,11 → 380,32
}
 
static int
gen8_render_ring_flush(struct intel_ring_buffer *ring,
gen8_emit_pipe_control(struct intel_engine_cs *ring,
u32 flags, u32 scratch_addr)
{
int ret;
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
return 0;
}
 
static int
gen8_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
flags |= PIPE_CONTROL_CS_STALL;
385,41 → 423,43
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
 
ret = intel_ring_begin(ring, 6);
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
ret = gen8_emit_pipe_control(ring,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD,
0);
if (ret)
return ret;
}
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
return 0;
 
return gen8_emit_pipe_control(ring, flags, scratch_addr);
}
 
static void ring_write_tail(struct intel_ring_buffer *ring,
static void ring_write_tail(struct intel_engine_cs *ring,
u32 value)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
 
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
RING_ACTHD(ring->mmio_base) : ACTHD;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u64 acthd;
 
return I915_READ(acthd_reg);
if (INTEL_INFO(ring->dev)->gen >= 8)
acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
RING_ACTHD_UDW(ring->mmio_base));
else if (INTEL_INFO(ring->dev)->gen >= 4)
acthd = I915_READ(RING_ACTHD(ring->mmio_base));
else
acthd = I915_READ(ACTHD);
 
return acthd;
}
 
static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
430,30 → 470,42
I915_WRITE(HWS_PGA, addr);
}
 
static int init_ring_common(struct intel_ring_buffer *ring)
static bool stop_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj;
int ret = 0;
u32 head;
struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
if (!IS_GEN2(ring->dev)) {
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
return false;
}
}
 
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
 
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
 
head = I915_READ_HEAD(ring) & HEAD_ADDR;
if (!IS_GEN2(ring->dev)) {
(void)I915_READ_CTL(ring);
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
 
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
}
 
static int init_ring_common(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
 
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
if (!stop_ring(ring)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
462,9 → 514,7
I915_READ_TAIL(ring),
I915_READ_START(ring));
 
I915_WRITE_HEAD(ring, 0);
 
if (I915_READ_HEAD(ring) & HEAD_ADDR) {
if (!stop_ring(ring)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
472,9 → 522,19
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
ret = -EIO;
goto out;
}
}
 
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
 
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(ring);
 
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
481,7 → 541,7
* register values. */
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
 
/* If the head is still not zero, the ring is dead */
489,21 → 549,21
I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
"ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
ring->name,
I915_READ_CTL(ring),
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
I915_READ_HEAD(ring), I915_READ_TAIL(ring),
I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
ret = -EIO;
goto out;
}
 
ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring_space(ring);
ring->last_retired_head = -1;
 
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = ring_space(ringbuf);
ringbuf->last_retired_head = -1;
 
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
out:
513,7 → 573,7
}
 
static int
init_pipe_control(struct intel_ring_buffer *ring)
init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
 
527,9 → 587,11
goto err;
}
 
i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
if (ret)
goto err_unref;
 
ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
if (ret)
goto err_unref;
 
545,7 → 607,7
return 0;
 
err_unpin:
i915_gem_object_unpin(ring->scratch.obj);
i915_gem_object_ggtt_unpin(ring->scratch.obj);
err_unref:
drm_gem_object_unreference(&ring->scratch.obj->base);
err:
552,13 → 614,16
return ret;
}
 
static int init_render_ring(struct intel_ring_buffer *ring)
static int init_render_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
if (ret)
return ret;
 
if (INTEL_INFO(dev)->gen > 3)
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
/* We need to disable the AsyncFlip performance optimisations in order
565,19 → 630,21
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
if (INTEL_INFO(dev)->gen == 6)
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
if (INTEL_INFO(dev)->gen >= 5) {
594,13 → 661,6
*/
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 
/* This is not explicitly set for GEN6, so read the register.
* see intel_ring_mi_set_context() for why we care.
* TODO: consider explicitly setting the bit for GEN5
*/
ring->itlb_before_ctx_switch =
!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
}
 
if (INTEL_INFO(dev)->gen >= 6)
612,7 → 672,7
return ret;
}
 
static void render_ring_cleanup(struct intel_ring_buffer *ring)
static void render_ring_cleanup(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
 
621,7 → 681,7
 
if (INTEL_INFO(dev)->gen >= 5) {
// kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_unpin(ring->scratch.obj);
i915_gem_object_ggtt_unpin(ring->scratch.obj);
}
 
drm_gem_object_unreference(&ring->scratch.obj->base);
628,22 → 688,113
ring->scratch.obj = NULL;
}
 
static void
update_mboxes(struct intel_ring_buffer *ring,
u32 mmio_offset)
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
/* NB: In order to be able to do semaphore MBOX updates for varying number
* of rings, it's easiest if we round up each individual update to a
* multiple of 2 (since ring updates must always be a multiple of 2)
* even though the actual update only requires 3 dwords.
*/
#define MBOX_UPDATE_DWORDS 4
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, MI_NOOP);
#define MBOX_UPDATE_DWORDS 8
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
int i, ret, num_rings;
 
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
 
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
 
for_each_ring(waiter, dev_priv, i) {
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
 
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
}
 
return 0;
}
 
static int gen8_xcs_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
int i, ret, num_rings;
 
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
 
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
 
for_each_ring(waiter, dev_priv, i) {
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
 
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
}
 
return 0;
}
 
static int gen6_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
int i, ret, num_rings;
 
#define MBOX_UPDATE_DWORDS 3
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
 
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
 
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
}
}
 
/* If num_dwords was rounded, make sure the tail pointer is correct */
if (num_rings % 2 == 0)
intel_ring_emit(signaller, MI_NOOP);
 
return 0;
}
 
/**
* gen6_add_request - Update the semaphore mailbox registers
*
654,29 → 805,18
* This acts like a signal in the canonical semaphore.
*/
static int
gen6_add_request(struct intel_ring_buffer *ring)
gen6_add_request(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *useless;
int i, ret, num_dwords = 4;
int ret;
 
if (i915_semaphore_is_enabled(dev))
num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
#undef MBOX_UPDATE_DWORDS
if (ring->semaphore.signal)
ret = ring->semaphore.signal(ring, 4);
else
ret = intel_ring_begin(ring, 4);
 
ret = intel_ring_begin(ring, num_dwords);
if (ret)
return ret;
 
if (i915_semaphore_is_enabled(dev)) {
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = ring->signal_mbox[i];
if (mbox_reg != GEN6_NOSYNC)
update_mboxes(ring, mbox_reg);
}
}
 
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
700,15 → 840,42
* @signaller - ring which has, or will signal
* @seqno - seqno which the waiter will block on
*/
 
static int
gen6_ring_sync(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
gen8_ring_sync(struct intel_engine_cs *waiter,
struct intel_engine_cs *signaller,
u32 seqno)
{
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
int ret;
 
ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
 
intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter,
lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
intel_ring_emit(waiter,
upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
intel_ring_advance(waiter);
return 0;
}
 
static int
gen6_ring_sync(struct intel_engine_cs *waiter,
struct intel_engine_cs *signaller,
u32 seqno)
{
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
int ret;
 
/* Throughout all of the GEM code, seqno passed implies our current
* seqno is >= the last seqno executed. However for hardware the
716,8 → 883,7
*/
seqno -= 1;
 
WARN_ON(signaller->semaphore_register[waiter->id] ==
MI_SEMAPHORE_SYNC_INVALID);
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
ret = intel_ring_begin(waiter, 4);
if (ret)
725,9 → 891,7
 
/* If seqno wrap happened, omit the wait with no-ops */
if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
intel_ring_emit(waiter,
dw1 |
signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, dw1 | wait_mbox);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
752,9 → 916,9
} while (0)
 
static int
pc_render_add_request(struct intel_ring_buffer *ring)
pc_render_add_request(struct intel_engine_cs *ring)
{
u32 scratch_addr = ring->scratch.gtt_offset + 128;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
776,15 → 940,15
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
scratch_addr += 2 * CACHELINE_BYTES;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
scratch_addr += 2 * CACHELINE_BYTES;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
scratch_addr += 2 * CACHELINE_BYTES;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
scratch_addr += 2 * CACHELINE_BYTES;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
800,45 → 964,48
}
 
static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
if (!lazy_coherency)
intel_ring_get_active_head(ring);
if (!lazy_coherency) {
struct drm_i915_private *dev_priv = ring->dev->dev_private;
POSTING_READ(RING_ACTHD(ring->mmio_base));
}
 
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static u32
ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static void
ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
 
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
return ring->scratch.cpu_page[0];
}
 
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
{
ring->scratch.cpu_page[0] = seqno;
}
 
static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
gen5_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
846,7 → 1013,7
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0)
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
853,23 → 1020,23
}
 
static void
gen5_ring_put_irq(struct intel_ring_buffer *ring)
gen5_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0)
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static bool
i9xx_ring_get_irq(struct intel_ring_buffer *ring)
i9xx_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
887,10 → 1054,10
}
 
static void
i9xx_ring_put_irq(struct intel_ring_buffer *ring)
i9xx_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
903,10 → 1070,10
}
 
static bool
i8xx_ring_get_irq(struct intel_ring_buffer *ring)
i8xx_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
924,10 → 1091,10
}
 
static void
i8xx_ring_put_irq(struct intel_ring_buffer *ring)
i8xx_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
939,10 → 1106,10
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
void intel_ring_setup_status_page(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 mmio = 0;
 
/* The ring status page addresses are no longer next to the rest of
956,6 → 1123,11
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
/*
* VCS2 actually doesn't exist on Gen7. Only shut up
* gcc switch check warning
*/
case VCS2:
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
973,9 → 1145,19
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
 
/* Flush the TLB for this page */
if (INTEL_INFO(dev)->gen >= 6) {
/*
* Flush the TLB for this page
*
* FIXME: These two bits have disappeared on gen8, so a question
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
u32 reg = RING_INSTPM(ring->mmio_base);
 
/* ring should be idle before issuing a sync flush*/
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
987,7 → 1169,7
}
 
static int
bsd_ring_flush(struct intel_ring_buffer *ring,
bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains)
{
1004,7 → 1186,7
}
 
static int
i9xx_add_request(struct intel_ring_buffer *ring)
i9xx_add_request(struct intel_engine_cs *ring)
{
int ret;
 
1022,10 → 1204,10
}
 
static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring)
gen6_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
1039,7 → 1221,7
GT_PARITY_ERROR(dev)));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
1047,10 → 1229,10
}
 
static void
gen6_ring_put_irq(struct intel_ring_buffer *ring)
gen6_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
1059,13 → 1241,13
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
else
I915_WRITE_IMR(ring, ~0);
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static bool
hsw_vebox_get_irq(struct intel_ring_buffer *ring)
hsw_vebox_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1077,7 → 1259,7
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
1085,7 → 1267,7
}
 
static void
hsw_vebox_put_irq(struct intel_ring_buffer *ring)
hsw_vebox_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1097,13 → 1279,13
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static bool
gen8_ring_get_irq(struct intel_ring_buffer *ring)
gen8_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1129,7 → 1311,7
}
 
static void
gen8_ring_put_irq(struct intel_ring_buffer *ring)
gen8_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1149,8 → 1331,8
}
 
static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 length,
i965_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 length,
unsigned flags)
{
int ret;
1172,8 → 1354,8
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
int ret;
1223,8 → 1405,8
}
 
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
i915_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
int ret;
1240,7 → 1422,7
return 0;
}
 
static void cleanup_status_page(struct intel_ring_buffer *ring)
static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
 
1249,38 → 1431,54
return;
 
// kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
}
 
static int init_status_page(struct intel_ring_buffer *ring)
static int init_status_page(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *obj;
 
if ((obj = ring->status_page.obj) == NULL) {
unsigned flags;
int ret;
 
obj = i915_gem_alloc_object(dev, 4096);
obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
ret = -ENOMEM;
goto err;
return -ENOMEM;
}
 
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
if (ret)
goto err_unref;
 
ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
flags = 0;
if (!HAS_LLC(ring->dev))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
* gen4, gen5, or byt, they also behave similarly
* and hang if the HWS is placed at the top of the
* GTT. To generalise, it appears that all !llc
* platforms have issues with us placing the HWS
* above the mappable region (even though we never
* actualy map it).
*/
flags |= PIN_MAPPABLE;
ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
if (ret) {
err_unref:
drm_gem_object_unreference(&obj->base);
return ret;
}
 
ring->status_page.obj = obj;
}
 
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW|0x100);
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1287,16 → 1485,9
ring->name, ring->status_page.gfx_addr);
 
return 0;
 
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
err:
return ret;
}
 
static int init_phys_status_page(struct intel_ring_buffer *ring)
static int init_phys_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
1313,46 → 1504,39
return 0;
}
 
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (!ringbuf->obj)
return;
 
iounmap(ringbuf->virtual_start);
i915_gem_object_ggtt_unpin(ringbuf->obj);
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
 
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = 32 * PAGE_SIZE;
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
if (ringbuf->obj)
return 0;
 
init_waitqueue_head(&ring->irq_queue);
 
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
if (ret)
return ret;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
return ret;
}
 
obj = NULL;
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ring->size);
obj = i915_gem_object_create_stolen(dev, ringbuf->size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, ring->size);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
goto err_hws;
}
obj = i915_gem_alloc_object(dev, ringbuf->size);
if (obj == NULL)
return -ENOMEM;
 
ring->obj = obj;
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
 
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
goto err_unref;
 
1360,63 → 1544,98
if (ret)
goto err_unpin;
 
ring->virtual_start =
ringbuf->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ringbuf->size);
if (ringbuf->virtual_start == NULL) {
ret = -EINVAL;
goto err_unpin;
}
 
ret = ring->init(ring);
ringbuf->obj = obj;
return 0;
 
err_unpin:
i915_gem_object_ggtt_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
return ret;
}
 
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
 
if (ringbuf == NULL) {
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf)
return -ENOMEM;
ring->buffer = ringbuf;
}
 
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ringbuf->size = 32 * PAGE_SIZE;
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
init_waitqueue_head(&ring->irq_queue);
 
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
if (ret)
goto err_unmap;
goto error;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
goto error;
}
 
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
goto error;
}
 
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
*/
ring->effective_size = ring->size;
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
ringbuf->effective_size = ringbuf->size;
if (IS_I830(dev) || IS_845G(dev))
ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
ret = i915_cmd_parser_init_ring(ring);
if (ret)
goto error;
 
ret = ring->init(ring);
if (ret)
goto error;
 
return 0;
 
err_unmap:
iounmap(ring->virtual_start);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
ring->obj = NULL;
err_hws:
// cleanup_status_page(ring);
error:
kfree(ringbuf);
ring->buffer = NULL;
return ret;
}
 
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv;
int ret;
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct intel_ringbuffer *ringbuf = ring->buffer;
 
if (ring->obj == NULL)
if (!intel_ring_initialized(ring))
return;
 
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_ring_idle(ring);
if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
I915_WRITE_CTL(ring, 0);
 
iounmap(ring->virtual_start);
 
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL;
intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
 
1424,80 → 1643,56
ring->cleanup(ring);
 
// cleanup_status_page(ring);
}
 
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
i915_cmd_parser_fini_ring(ring);
 
ret = i915_wait_seqno(ring, seqno);
if (!ret)
i915_gem_retire_requests_ring(ring);
 
return ret;
kfree(ringbuf);
ring->buffer = NULL;
}
 
static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret;
 
i915_gem_retire_requests_ring(ring);
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
if (ring->last_retired_head != -1) {
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
ring->space = ring_space(ring);
if (ring->space >= n)
ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n)
return 0;
}
 
list_for_each_entry(request, &ring->request_list, list) {
int space;
 
if (request->tail == -1)
continue;
 
space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
if (space >= n) {
if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
seqno = request->seqno;
break;
}
 
/* Consume this request in case we need more space than
* is available and so need to prevent a race between
* updating last_retired_head and direct reads of
* I915_RING_HEAD. It also provides a nice sanity check.
*/
request->tail = -1;
}
 
if (seqno == 0)
return -ENOSPC;
 
ret = intel_ring_wait_seqno(ring, seqno);
ret = i915_wait_seqno(ring, seqno);
if (ret)
return ret;
 
if (WARN_ON(ring->last_retired_head == -1))
return -ENOSPC;
i915_gem_retire_requests_ring(ring);
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
ring->space = ring_space(ring);
if (WARN_ON(ring->space < n))
return -ENOSPC;
 
ringbuf->space = ring_space(ringbuf);
return 0;
}
 
static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ring->buffer;
unsigned long end;
int ret;
 
1508,56 → 1703,63
/* force the tail write in case we have been skipping them */
__intel_ring_advance(ring);
 
trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume
* timers don't work yet, so prevent a complete hang in that
* case by choosing an insanely large timeout. */
end = GetTimerTicks() + 60 * HZ;
end = jiffies + 60 * HZ;
 
trace_i915_ring_wait_begin(ring);
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
if (ring->space >= n) {
trace_i915_ring_wait_end(ring);
return 0;
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
}
 
 
msleep(1);
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
break;
 
if (time_after(jiffies, end)) {
ret = -EBUSY;
break;
}
} while (1);
trace_i915_ring_wait_end(ring);
return ret;
} while (!time_after(GetTimerTicks(), end));
trace_i915_ring_wait_end(ring);
return -EBUSY;
}
 
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
{
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
struct intel_ringbuffer *ringbuf = ring->buffer;
int rem = ringbuf->size - ringbuf->tail;
 
if (ring->space < rem) {
if (ringbuf->space < rem) {
int ret = ring_wait_for_space(ring, rem);
if (ret)
return ret;
}
 
virt = ring->virtual_start + ring->tail;
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
 
ring->tail = 0;
ring->space = ring_space(ring);
ringbuf->tail = 0;
ringbuf->space = ring_space(ringbuf);
 
return 0;
}
 
int intel_ring_idle(struct intel_ring_buffer *ring)
int intel_ring_idle(struct intel_engine_cs *ring)
{
u32 seqno;
int ret;
1581,7 → 1783,7
}
 
static int
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
intel_ring_alloc_seqno(struct intel_engine_cs *ring)
{
if (ring->outstanding_lazy_seqno)
return 0;
1599,18 → 1801,19
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
 
static int __intel_ring_prepare(struct intel_ring_buffer *ring,
static int __intel_ring_prepare(struct intel_engine_cs *ring,
int bytes)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
 
if (unlikely(ring->tail + bytes > ring->effective_size)) {
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
}
 
if (unlikely(ring->space < bytes)) {
if (unlikely(ringbuf->space < bytes)) {
ret = ring_wait_for_space(ring, bytes);
if (unlikely(ret))
return ret;
1619,10 → 1822,10
return 0;
}
 
int intel_ring_begin(struct intel_ring_buffer *ring,
int intel_ring_begin(struct intel_engine_cs *ring,
int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1639,20 → 1842,43
if (ret)
return ret;
 
ring->space -= num_dwords * sizeof(uint32_t);
ring->buffer->space -= num_dwords * sizeof(uint32_t);
return 0;
}
 
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
 
if (num_dwords == 0)
return 0;
 
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
ret = intel_ring_begin(ring, num_dwords);
if (ret)
return ret;
 
while (num_dwords--)
intel_ring_emit(ring, MI_NOOP);
 
intel_ring_advance(ring);
 
return 0;
}
 
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
BUG_ON(ring->outstanding_lazy_seqno);
 
if (INTEL_INFO(ring->dev)->gen >= 6) {
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
if (HAS_VEBOX(ring->dev))
if (HAS_VEBOX(dev))
I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
 
1660,10 → 1886,10
ring->hangcheck.seqno = seqno;
}
 
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
u32 value)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
/* Every tail move must follow the sequence below */
 
1693,7 → 1919,7
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
 
static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
1729,8 → 1955,8
}
 
static int
gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
1744,8 → 1970,8
 
/* FIXME(BDW): Address space and security selectors. */
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
intel_ring_emit(ring, offset);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
1753,8 → 1979,8
}
 
static int
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
int ret;
1774,8 → 2000,8
}
 
static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len,
unsigned flags)
{
int ret;
1796,7 → 2022,7
 
/* Blitter support (SandyBridge+) */
 
static int gen6_ring_flush(struct intel_ring_buffer *ring,
static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
1838,38 → 2064,76
 
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_object *obj;
int ret;
 
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
 
if (INTEL_INFO(dev)->gen >= 6) {
if (INTEL_INFO(dev)->gen >= 8) {
if (i915_semaphore_is_enabled(dev)) {
obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
i915.semaphores = 0;
} else {
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
i915.semaphores = 0;
} else
dev_priv->semaphore_obj = obj;
}
}
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (i915_semaphore_is_enabled(dev)) {
WARN_ON(!dev_priv->semaphore_obj);
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_rcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
} else if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush;
if (INTEL_INFO(dev)->gen >= 8) {
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
} else {
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
}
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
ring->signal_mbox[RCS] = GEN6_NOSYNC;
ring->signal_mbox[VCS] = GEN6_VRSYNC;
ring->signal_mbox[BCS] = GEN6_BRSYNC;
ring->signal_mbox[VECS] = GEN6_VERSYNC;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen6_ring_sync;
ring->semaphore.signal = gen6_signal;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
* platform. So the semaphore between RCS and VCS2 is
* initialized as INVALID. Gen8 will initialize the
* sema between VCS2 and RCS later.
*/
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
1897,6 → 2161,7
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
 
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
1914,9 → 2179,6
 
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
struct drm_i915_gem_object *obj;
int ret;
 
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
1923,7 → 2185,7
return -ENOMEM;
}
 
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
1940,10 → 2202,18
#if 0
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
 
if (ringbuf == NULL) {
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf)
return -ENOMEM;
ring->buffer = ringbuf;
}
 
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
1950,7 → 2220,8
 
if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */
return -ENODEV;
ret = -ENODEV;
goto err_ringbuf;
}
 
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
1985,32 → 2256,40
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
 
ring->size = size;
ring->effective_size = ring->size;
ringbuf->size = size;
ringbuf->effective_size = ringbuf->size;
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
ring->virtual_start = ioremap_wc(start, size);
if (ring->virtual_start == NULL) {
ringbuf->virtual_start = ioremap_wc(start, size);
if (ringbuf->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
ret = -ENOMEM;
goto err_ringbuf;
}
 
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_status_page(ring);
if (ret)
return ret;
goto err_vstart;
}
 
return 0;
 
err_vstart:
iounmap(ringbuf->virtual_start);
err_ringbuf:
kfree(ringbuf);
ring->buffer = NULL;
return ret;
}
#endif
 
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS];
 
ring->name = "bsd ring";
ring->id = VCS;
2032,6 → 2311,11
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
} else {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
2038,16 → 2322,21
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen6_ring_sync;
ring->semaphore.signal = gen6_signal;
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
ring->signal_mbox[RCS] = GEN6_RVSYNC;
ring->signal_mbox[VCS] = GEN6_NOSYNC;
ring->signal_mbox[BCS] = GEN6_BVSYNC;
ring->signal_mbox[VECS] = GEN6_VEVSYNC;
}
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
2070,10 → 2359,49
return intel_init_ring_buffer(dev, ring);
}
 
/**
* Initialize the second BSD ring for Broadwell GT3.
* It is noted that this only exists on Broadwell GT3.
*/
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
 
if ((INTEL_INFO(dev)->gen != 8)) {
DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
return -EINVAL;
}
 
ring->name = "bsd2 ring";
ring->id = VCS2;
 
ring->write_tail = ring_write_tail;
ring->mmio_base = GEN8_BSD2_RING_BASE;
ring->flush = gen6_bsd_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
ring->init = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
}
 
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[BCS];
 
ring->name = "blitter ring";
ring->id = BCS;
2090,21 → 2418,38
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
} else {
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.signal = gen6_signal;
ring->semaphore.sync_to = gen6_ring_sync;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
* platform. So the semaphore between BCS and VCS2 is
* initialized as INVALID. Gen8 will initialize the
* sema between BCS and VCS2 later.
*/
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
ring->signal_mbox[RCS] = GEN6_RBSYNC;
ring->signal_mbox[VCS] = GEN6_VBSYNC;
ring->signal_mbox[BCS] = GEN6_NOSYNC;
ring->signal_mbox[VECS] = GEN6_VEBSYNC;
}
ring->init = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
2112,8 → 2457,8
 
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VECS];
 
ring->name = "video enhancement ring";
ring->id = VECS;
2131,21 → 2476,31
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
} else {
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen6_ring_sync;
ring->semaphore.signal = gen6_signal;
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
ring->signal_mbox[RCS] = GEN6_RVESYNC;
ring->signal_mbox[VCS] = GEN6_VVESYNC;
ring->signal_mbox[BCS] = GEN6_BVESYNC;
ring->signal_mbox[VECS] = GEN6_NOSYNC;
}
ring->init = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
2152,7 → 2507,7
}
 
int
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
intel_ring_flush_all_caches(struct intel_engine_cs *ring)
{
int ret;
 
2170,7 → 2525,7
}
 
int
intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
{
uint32_t flush_domains;
int ret;
2188,3 → 2543,19
ring->gpu_caches_dirty = false;
return 0;
}
 
void
intel_stop_ring_buffer(struct intel_engine_cs *ring)
{
int ret;
 
if (!intel_ring_initialized(ring))
return;
 
ret = intel_ring_idle(ring);
if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
 
stop_ring(ring);
}
/drivers/video/drm/i915/intel_ringbuffer.h
1,6 → 1,10
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
 
#include <linux/hashtable.h>
 
#define I915_CMD_HASH_ORDER 9
 
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
33,35 → 37,58
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
 
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
#define i915_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SIGNAL_OFFSET(__ring, to) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (to)))
 
#define GEN8_WAIT_OFFSET(__ring, from) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (__ring)->id))
 
#define GEN8_RING_SEMAPHORE_INIT do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
 
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
HANGCHECK_ACTIVE_LOOP,
HANGCHECK_KICK,
HANGCHECK_HUNG,
};
 
#define HANGCHECK_SCORE_RING_HUNG 31
 
struct intel_ring_hangcheck {
bool deadlock;
u64 acthd;
u64 max_acthd;
u32 seqno;
u32 acthd;
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
};
 
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
RCS = 0x0,
VCS,
BCS,
VECS,
} id;
#define I915_NUM_RINGS 4
u32 mmio_base;
struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
 
u32 head;
u32 tail;
68,7 → 95,6
int space;
int size;
int effective_size;
struct intel_hw_status_page status_page;
 
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
79,22 → 105,39
* we can detect new retirements.
*/
u32 last_retired_head;
};
 
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
RCS = 0x0,
VCS,
BCS,
VECS,
VCS2
} id;
#define I915_NUM_RINGS 5
#define LAST_USER_RING (VECS + 1)
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
 
struct intel_hw_status_page status_page;
 
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
void (*irq_put)(struct intel_engine_cs *ring);
 
int (*init)(struct intel_ring_buffer *ring);
int (*init)(struct intel_engine_cs *ring);
 
void (*write_tail)(struct intel_ring_buffer *ring,
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
int __must_check (*flush)(struct intel_ring_buffer *ring,
int __must_check (*flush)(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring);
int (*add_request)(struct intel_engine_cs *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
101,25 → 144,76
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
u32 (*get_seqno)(struct intel_engine_cs *ring,
bool lazy_coherency);
void (*set_seqno)(struct intel_ring_buffer *ring,
void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length,
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
u64 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
u32 seqno);
void (*cleanup)(struct intel_engine_cs *ring);
 
/* GEN8 signal/wait table - never trust comments!
* signal to signal to signal to signal to signal to
* RCS VCS BCS VECS VCS2
* --------------------------------------------------------------------
* RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
* |-------------------------------------------------------------------
* VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
* |-------------------------------------------------------------------
* BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
* |-------------------------------------------------------------------
* VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
* |-------------------------------------------------------------------
* VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
* |-------------------------------------------------------------------
*
* Generalization:
* f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
* ie. transpose of g(x, y)
*
* sync from sync from sync from sync from sync from
* RCS VCS BCS VECS VCS2
* --------------------------------------------------------------------
* RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
* |-------------------------------------------------------------------
* VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
* |-------------------------------------------------------------------
* BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
* |-------------------------------------------------------------------
* VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
* |-------------------------------------------------------------------
* VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
* |-------------------------------------------------------------------
*
* Generalization:
* g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
* ie. transpose of f(x, y)
*/
struct {
u32 sync_seqno[I915_NUM_RINGS-1];
 
union {
struct {
/* our mbox written by others */
u32 semaphore_register[I915_NUM_RINGS];
u32 wait[I915_NUM_RINGS];
/* mboxes this ring signals to */
u32 signal_mbox[I915_NUM_RINGS];
u32 signal[I915_NUM_RINGS];
} mbox;
u64 signal_ggtt[I915_NUM_RINGS];
};
 
/* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring,
struct intel_engine_cs *to,
u32 seqno);
int (*signal)(struct intel_engine_cs *signaller,
/* num_dwords needed by caller */
unsigned int num_dwords);
} semaphore;
 
/**
* List of objects currently involved in rendering from the
* ringbuffer.
148,12 → 242,8
 
wait_queue_head_t irq_queue;
 
/**
* Do an explicit TLB flush before MI_SET_CONTEXT
*/
bool itlb_before_ctx_switch;
struct i915_hw_context *default_context;
struct i915_hw_context *last_context;
struct intel_context *default_context;
struct intel_context *last_context;
 
struct intel_ring_hangcheck hangcheck;
 
162,30 → 252,65
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
 
bool needs_cmd_parser;
 
/*
* Table of commands the command parser needs to know about
* for this ring.
*/
DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
 
/*
* Table of registers allowed in commands that read/write registers.
*/
const u32 *reg_table;
int reg_count;
 
/*
* Table of registers allowed in commands that read/write registers, but
* only from the DRM master.
*/
const u32 *master_reg_table;
int master_reg_count;
 
/*
* Returns the bitmask for the length field of the specified command.
* Return 0 for an unrecognized/invalid command.
*
* If the command parser finds an entry for a command in the ring's
* cmd_tables, it gets the command's length based on the table entry.
* If not, it calls this function to determine the per-ring length field
* encoding for the command (i.e. certain opcode ranges use certain bits
* to encode the command length in the header).
*/
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
 
static inline bool
intel_ring_initialized(struct intel_ring_buffer *ring)
intel_ring_initialized(struct intel_engine_cs *ring)
{
return ring->obj != NULL;
return ring->buffer && ring->buffer->obj;
}
 
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
intel_ring_flag(struct intel_engine_cs *ring)
{
return 1 << ring->id;
}
 
static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring,
struct intel_ring_buffer *other)
intel_ring_sync_index(struct intel_engine_cs *ring,
struct intel_engine_cs *other)
{
int idx;
 
/*
* cs -> 0 = vcs, 1 = bcs
* vcs -> 0 = bcs, 1 = cs,
* bcs -> 0 = cs, 1 = vcs.
* rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
* vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
* bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
* vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
 
idx = (other - ring) - 1;
196,7 → 321,7
}
 
static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
intel_read_status_page(struct intel_engine_cs *ring,
int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
205,7 → 330,7
}
 
static inline void
intel_write_status_page(struct intel_ring_buffer *ring,
intel_write_status_page(struct intel_engine_cs *ring,
int reg, u32 value)
{
ring->status_page.page_addr[reg] = value;
230,46 → 355,51
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
 
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
static inline void intel_ring_emit(struct intel_engine_cs *ring,
u32 data)
{
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
struct intel_ringbuffer *ringbuf = ring->buffer;
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
static inline void intel_ring_advance(struct intel_ring_buffer *ring)
static inline void intel_ring_advance(struct intel_engine_cs *ring)
{
ring->tail &= ring->size - 1;
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
}
void __intel_ring_advance(struct intel_ring_buffer *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
 
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
 
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
 
static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
return ring->tail;
return ringbuf->tail;
}
 
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
 
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;
/drivers/video/drm/i915/intel_sdvo.c
1153,20 → 1153,21
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
 
pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
 
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
/* FIXME: This bit is only valid when using TMDS encoding and 8
* bit per color mode. */
if (intel_sdvo->has_hdmi_monitor &&
if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
else
intel_sdvo->color_range = 0;
pipe_config->limited_color_range = true;
} else {
if (pipe_config->has_hdmi_sink &&
intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
}
 
if (intel_sdvo->color_range)
pipe_config->limited_color_range = true;
 
/* Clock computation needs to happen after pixel multiplier. */
if (intel_sdvo->is_tv)
i9xx_adjust_sdvo_tv_clock(pipe_config);
1174,7 → 1175,7
return true;
}
 
static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1223,7 → 1224,7
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
 
if (intel_sdvo->has_hdmi_monitor) {
if (crtc->config.has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
1258,8 → 1259,8
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
1349,6 → 1350,8
u8 val;
bool ret;
 
sdvox = I915_READ(intel_sdvo->sdvo_reg);
 
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
/* Some sdvo encoders are not spec compliant and don't
1377,13 → 1380,14
* other platfroms.
*/
if (IS_I915G(dev) || IS_I915GM(dev)) {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
 
dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
dotclock = pipe_config->port_clock;
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
 
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
1406,6 → 1410,15
}
}
 
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
 
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
&val, 1)) {
if (val == SDVO_ENCODE_HDMI)
pipe_config->has_hdmi_sink = true;
}
 
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
1461,7 → 1474,7
u32 temp;
bool input1, input2;
int i;
u8 status;
bool success;
 
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0) {
1475,12 → 1488,12
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
if (success && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to "
"sync\n", SDVO_NAME(intel_sdvo));
}
1732,7 → 1745,7
enum drm_connector_status ret;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
 
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
1794,7 → 1807,7
struct edid *edid;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
 
/* set the bus switch and get the modes */
edid = intel_sdvo_get_edid(connector);
1892,7 → 1905,7
int i;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
 
/* Read the list of supported input resolutions for the selected TV
* format.
1929,7 → 1942,7
struct drm_display_mode *newmode;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
connector->base.id, connector->name);
 
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
2382,15 → 2395,33
}
 
static void
intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
{
struct drm_connector *drm_connector;
struct intel_sdvo *sdvo_encoder;
 
drm_connector = &intel_connector->base;
sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
 
intel_connector_unregister(intel_connector);
}
 
static int
intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
struct intel_sdvo *encoder)
{
drm_connector_init(encoder->base.base.dev,
&connector->base.base,
struct drm_connector *drm_connector;
int ret;
 
drm_connector = &connector->base.base;
ret = drm_connector_init(encoder->base.base.dev,
drm_connector,
&intel_sdvo_connector_funcs,
connector->base.base.connector_type);
if (ret < 0)
return ret;
 
drm_connector_helper_add(&connector->base.base,
drm_connector_helper_add(drm_connector,
&intel_sdvo_connector_helper_funcs);
 
connector->base.base.interlace_allowed = 1;
2397,9 → 2428,11
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
connector->base.unregister = intel_sdvo_connector_unregister;
 
intel_connector_attach_encoder(&connector->base, &encoder->base);
drm_sysfs_connector_add(&connector->base.base);
 
return ret;
}
 
static void
2459,7 → 2492,11
intel_sdvo->is_hdmi = true;
}
 
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
return false;
}
 
if (intel_sdvo->is_hdmi)
intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
 
2490,7 → 2527,10
 
intel_sdvo->is_tv = true;
 
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
return false;
}
 
if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
goto err;
2501,7 → 2541,7
return true;
 
err:
drm_sysfs_connector_remove(connector);
drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
2534,8 → 2574,11
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
 
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
return false;
}
 
return true;
}
 
2566,7 → 2609,11
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
 
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector);
return false;
}
 
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
 
2573,7 → 2620,7
return true;
 
err:
drm_sysfs_connector_remove(connector);
drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
2646,7 → 2693,7
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
drm_sysfs_connector_remove(connector);
drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
}
}
2947,7 → 2994,7
 
intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
intel_encoder->mode_set = intel_sdvo_mode_set;
intel_encoder->pre_enable = intel_sdvo_pre_enable;
intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
intel_encoder->get_config = intel_sdvo_get_config;
2980,7 → 3027,7
* simplistic anyway to express such constraints, so just give up on
* cloning for SDVO encoders.
*/
intel_sdvo->base.cloneable = false;
intel_sdvo->base.cloneable = 0;
 
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 
/drivers/video/drm/i915/intel_sideband.c
29,12 → 29,21
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
* VLV_VLV2_PUNIT_HAS_0.8.docx
*/
 
/* Standard MMIO read, non-posted */
#define SB_MRD_NP 0x00
/* Standard MMIO write, non-posted */
#define SB_MWR_NP 0x01
/* Private register read, double-word addressing, non-posted */
#define SB_CRRDDA_NP 0x06
/* Private register write, double-word addressing, non-posted */
#define SB_CRWRDA_NP 0x07
 
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
u32 cmd, be = 0xf, bar = 0;
bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
opcode == DPIO_OPCODE_REG_READ);
bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
 
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
74,7 → 83,7
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
PUNIT_OPCODE_REG_READ, addr, &val);
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
 
return val;
86,7 → 95,7
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
PUNIT_OPCODE_REG_WRITE, addr, &val);
SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
 
95,7 → 104,7
u32 val = 0;
 
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
PUNIT_OPCODE_REG_READ, reg, &val);
SB_CRRDDA_NP, reg, &val);
 
return val;
}
103,7 → 112,7
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
PUNIT_OPCODE_REG_WRITE, reg, &val);
SB_CRWRDA_NP, reg, &val);
}
 
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
114,7 → 123,7
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
PUNIT_OPCODE_REG_READ, addr, &val);
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
 
return val;
124,7 → 133,7
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
PUNIT_OPCODE_REG_READ, reg, &val);
SB_CRRDDA_NP, reg, &val);
return val;
}
 
131,7 → 140,7
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
PUNIT_OPCODE_REG_WRITE, reg, &val);
SB_CRWRDA_NP, reg, &val);
}
 
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
138,7 → 147,7
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
PUNIT_OPCODE_REG_READ, reg, &val);
SB_CRRDDA_NP, reg, &val);
return val;
}
 
145,7 → 154,7
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
PUNIT_OPCODE_REG_WRITE, reg, &val);
SB_CRWRDA_NP, reg, &val);
}
 
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
152,7 → 161,7
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
PUNIT_OPCODE_REG_READ, reg, &val);
SB_CRRDDA_NP, reg, &val);
return val;
}
 
159,7 → 168,7
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
PUNIT_OPCODE_REG_WRITE, reg, &val);
SB_CRWRDA_NP, reg, &val);
}
 
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
166,7 → 175,7
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
PUNIT_OPCODE_REG_READ, reg, &val);
SB_CRRDDA_NP, reg, &val);
return val;
}
 
173,7 → 182,7
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
PUNIT_OPCODE_REG_WRITE, reg, &val);
SB_CRWRDA_NP, reg, &val);
}
 
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
181,7 → 190,15
u32 val = 0;
 
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
DPIO_OPCODE_REG_READ, reg, &val);
SB_MRD_NP, reg, &val);
 
/*
* FIXME: There might be some registers where all 1's is a valid value,
* so ideally we should check the register offset instead...
*/
WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
pipe_name(pipe), reg, val);
 
return val;
}
 
188,7 → 205,7
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
DPIO_OPCODE_REG_WRITE, reg, &val);
SB_MWR_NP, reg, &val);
}
 
/* SBI access */
253,13 → 270,13
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
DPIO_OPCODE_REG_READ, reg, &val);
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
reg, &val);
return val;
}
 
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
DPIO_OPCODE_REG_WRITE, reg, &val);
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
reg, &val);
}
/drivers/video/drm/i915/intel_sprite.c
37,6 → 37,106
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
{
/* paranoia */
if (!mode->crtc_htotal)
return 1;
 
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
}
 
static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
DEFINE_WAIT(wait);
 
WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
 
vblank_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
 
/* FIXME needs to be calibrated sensibly */
min = vblank_start - usecs_to_scanlines(mode, 100);
max = vblank_start - 1;
 
if (min <= 0 || max <= 0)
return false;
 
// if (WARN_ON(drm_vblank_get(dev, pipe)))
// return false;
 
// local_irq_disable();
 
// trace_i915_pipe_update_start(crtc, min, max);
 
for (;;) {
/*
* prepare_to_wait() has a memory barrier, which guarantees
* other CPUs can see the task state update by the time we
* read the scanline.
*/
prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE);
 
scanline = intel_get_crtc_scanline(crtc);
if (scanline < min || scanline > max)
break;
 
if (timeout <= 0) {
DRM_ERROR("Potential atomic update failure on pipe %c\n",
pipe_name(crtc->pipe));
break;
}
 
// local_irq_enable();
 
schedule_timeout(timeout);
timeout = 0;
// local_irq_disable();
}
 
finish_wait(&crtc->vbl_wait, &wait);
 
// drm_vblank_put(dev, pipe);
 
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
 
// trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
 
return true;
}
 
static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
 
// trace_i915_pipe_update_end(crtc, end_vbl_count);
 
// local_irq_enable();
 
if (start_vbl_count != end_vbl_count)
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
pipe_name(pipe), start_vbl_count, end_vbl_count);
}
 
static void intel_update_primary_plane(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
int reg = DSPCNTR(crtc->plane);
 
if (crtc->primary_enabled)
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
else
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
}
 
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
48,11 → 148,14
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
u32 start_vbl_count;
bool atomic_update;
 
sprctl = I915_READ(SPCNTR(pipe, plane));
 
115,7 → 218,8
 
sprctl |= SP_ENABLE;
 
intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
124,9 → 228,6
crtc_w--;
crtc_h--;
 
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
obj->tiling_mode,
134,6 → 235,13
fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
 
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
else
143,7 → 251,11
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
}
 
static void
152,16 → 264,27
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 start_vbl_count;
bool atomic_update;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
~SP_ENABLE);
/* Activate double buffered register update */
I915_WRITE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
 
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
 
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
 
static int
226,10 → 349,13
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
u32 start_vbl_count;
bool atomic_update;
 
sprctl = I915_READ(SPRCTL(pipe));
 
281,7 → 407,8
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
293,9 → 420,6
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
302,6 → 426,13
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
 
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
317,7 → 448,11
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
}
 
static void
326,8 → 461,15
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
u32 start_vbl_count;
bool atomic_update;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
334,8 → 476,12
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_WRITE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
 
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
342,7 → 488,7
*/
intel_wait_for_vblank(dev, pipe);
 
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
 
static int
410,10 → 556,13
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
u32 start_vbl_count;
bool atomic_update;
 
dvscntr = I915_READ(DVSCNTR(pipe));
 
459,7 → 608,8
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
 
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
472,9 → 622,6
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
481,6 → 628,13
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
 
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
491,7 → 645,11
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
}
 
static void
500,15 → 658,26
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
u32 start_vbl_count;
bool atomic_update;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
 
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
515,25 → 684,15
*/
intel_wait_for_vblank(dev, pipe);
 
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
 
static void
intel_enable_primary(struct drm_crtc *crtc)
intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
 
if (intel_crtc->primary_enabled)
return;
 
intel_crtc->primary_enabled = true;
 
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
540,10 → 699,7
* when going from primary only to sprite only and vice
* versa.
*/
if (intel_crtc->config.ips_enabled) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
hsw_enable_ips(intel_crtc);
}
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
551,18 → 707,12
}
 
static void
intel_disable_primary(struct drm_crtc *crtc)
intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
 
if (!intel_crtc->primary_enabled)
return;
 
intel_crtc->primary_enabled = false;
 
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
intel_disable_fbc(dev);
575,9 → 725,6
* versa.
*/
hsw_disable_ips(intel_crtc);
 
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
}
 
static int
667,11 → 814,12
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int ret;
bool disable_primary = false;
bool primary_enabled;
bool visible;
int hscale, vscale;
int max_scale, min_scale;
842,8 → 990,8
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
WARN_ON(disable_primary && !visible && intel_crtc->active);
primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
WARN_ON(!primary_enabled && !visible && intel_crtc->active);
 
mutex_lock(&dev->struct_mutex);
 
854,6 → 1002,8
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
 
if (ret)
870,13 → 1020,15
intel_plane->obj = obj;
 
if (intel_crtc->active) {
/*
* Be sure to re-enable the primary before the sprite is no longer
* covering it fully.
*/
if (!disable_primary)
intel_enable_primary(crtc);
bool primary_was_enabled = intel_crtc->primary_enabled;
 
intel_crtc->primary_enabled = primary_enabled;
 
// if (primary_was_enabled != primary_enabled)
 
if (primary_was_enabled && !primary_enabled)
intel_pre_disable_primary(crtc);
 
if (visible)
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
884,8 → 1036,8
else
intel_plane->disable_plane(plane, crtc);
 
if (disable_primary)
intel_disable_primary(crtc);
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
 
/* Unpin old obj after new one is active to avoid ugliness */
913,6 → 1065,7
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
enum pipe pipe;
 
if (!plane->fb)
return 0;
921,10 → 1074,17
return -EINVAL;
 
intel_crtc = to_intel_crtc(plane->crtc);
pipe = intel_crtc->pipe;
 
if (intel_crtc->active) {
intel_enable_primary(plane->crtc);
bool primary_was_enabled = intel_crtc->primary_enabled;
 
intel_crtc->primary_enabled = true;
 
intel_plane->disable_plane(plane, plane->crtc);
 
if (!primary_was_enabled && intel_crtc->primary_enabled)
intel_post_enable_primary(plane->crtc);
}
 
if (intel_plane->obj) {
933,6 → 1093,8
 
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
i915_gem_track_fb(intel_plane->obj, NULL,
INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
 
intel_plane->obj = NULL;
953,7 → 1115,6
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
967,13 → 1128,12
 
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
plane = drm_plane_find(dev, set->plane_id);
if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
 
plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
ret = intel_plane->update_colorkey(plane, set);
 
986,7 → 1146,6
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
996,13 → 1155,12
 
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
plane = drm_plane_find(dev, get->plane_id);
if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
 
plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
intel_plane->get_colorkey(plane, get);
 
/drivers/video/drm/i915/intel_uncore.c
40,6 → 40,12
 
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
 
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
"Device suspended\n");
}
 
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
83,7 → 89,7
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_MT */
90,7 → 96,7
__raw_posting_read(dev_priv, ECOBUS);
}
 
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
u32 forcewake_ack;
136,7 → 142,7
gen6_gt_check_fifodbg(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
143,6 → 149,8
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
 
if (IS_GEN7(dev_priv->dev))
gen6_gt_check_fifodbg(dev_priv);
}
 
177,6 → 185,8
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(0xffff));
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
221,8 → 231,8
}
 
/* WaRsForcewakeWaitTC0:vlv */
if (!IS_CHERRYVIEW(dev_priv->dev))
__gen6_gt_wait_for_thread_c0(dev_priv);
 
}
 
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
240,81 → 250,120
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
/* The below doubles as a POSTING_READ */
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
if (!IS_CHERRYVIEW(dev_priv->dev))
gen6_gt_check_fifodbg(dev_priv);
 
}
 
void vlv_force_wake_get(struct drm_i915_private *dev_priv,
int fw_engine)
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (FORCEWAKE_RENDER & fw_engine) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
}
if (FORCEWAKE_MEDIA & fw_engine) {
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
}
 
if (fw_engine & FORCEWAKE_RENDER &&
dev_priv->uncore.fw_rendercount++ != 0)
fw_engine &= ~FORCEWAKE_RENDER;
if (fw_engine & FORCEWAKE_MEDIA &&
dev_priv->uncore.fw_mediacount++ != 0)
fw_engine &= ~FORCEWAKE_MEDIA;
 
if (fw_engine)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
void vlv_force_wake_put(struct drm_i915_private *dev_priv,
int fw_engine)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
if (FORCEWAKE_RENDER & fw_engine) {
WARN_ON(dev_priv->uncore.fw_rendercount == 0);
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
if (fw_engine & FORCEWAKE_RENDER) {
WARN_ON(!dev_priv->uncore.fw_rendercount);
if (--dev_priv->uncore.fw_rendercount != 0)
fw_engine &= ~FORCEWAKE_RENDER;
}
 
if (FORCEWAKE_MEDIA & fw_engine) {
WARN_ON(dev_priv->uncore.fw_mediacount == 0);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
if (fw_engine & FORCEWAKE_MEDIA) {
WARN_ON(!dev_priv->uncore.fw_mediacount);
if (--dev_priv->uncore.fw_mediacount != 0)
fw_engine &= ~FORCEWAKE_MEDIA;
}
 
if (fw_engine)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
static void gen6_force_wake_work(struct work_struct *work)
static void gen6_force_wake_timer(unsigned long arg)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
struct drm_i915_private *dev_priv = (void *)arg;
unsigned long irqflags;
 
assert_device_not_suspended(dev_priv);
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
WARN_ON(!dev_priv->uncore.forcewake_count);
 
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
intel_runtime_pm_put(dev_priv);
}
 
static void intel_uncore_forcewake_reset(struct drm_device *dev)
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (IS_VALLEYVIEW(dev)) {
if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
gen6_force_wake_timer((unsigned long)dev_priv);
 
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
if (IS_VALLEYVIEW(dev))
vlv_force_wake_reset(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
else if (IS_GEN6(dev) || IS_GEN7(dev))
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
__gen7_gt_force_wake_mt_reset(dev_priv);
 
if (restore) { /* If reset with a user forcewake, try to restore */
unsigned fw = 0;
 
if (IS_VALLEYVIEW(dev)) {
if (dev_priv->uncore.fw_rendercount)
fw |= FORCEWAKE_RENDER;
 
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
} else {
if (dev_priv->uncore.forcewake_count)
fw = FORCEWAKE_ALL;
}
 
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
 
if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
}
 
void intel_uncore_early_sanitize(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
321,7 → 370,7
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 
if (IS_HASWELL(dev) &&
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
337,30 → 386,14
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
 
intel_uncore_forcewake_reset(dev);
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
 
void intel_uncore_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_val;
 
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
 
/* Turn off power gate, require especially for the BIOS less system */
if (IS_VALLEYVIEW(dev)) {
 
mutex_lock(&dev_priv->rps.hw_lock);
reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
 
if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
 
mutex_unlock(&dev_priv->rps.hw_lock);
 
}
}
 
/*
* Generally this is called implicitly by the register read function. However,
393,31 → 426,84
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
bool delayed = false;
 
if (!dev_priv->uncore.funcs.force_wake_put)
return;
 
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev))
return vlv_force_wake_put(dev_priv, fw_engine);
if (IS_VALLEYVIEW(dev_priv->dev)) {
vlv_force_wake_put(dev_priv, fw_engine);
goto out;
}
 
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
WARN_ON(!dev_priv->uncore.forcewake_count);
 
if (--dev_priv->uncore.forcewake_count == 0) {
dev_priv->uncore.forcewake_count++;
mod_delayed_work(dev_priv->wq,
&dev_priv->uncore.force_wake_work,
1);
delayed = true;
// mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
// GetTimerTicks() + 1);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
out:
if (!delayed)
intel_runtime_pm_put(dev_priv);
}
 
void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
{
if (!dev_priv->uncore.funcs.force_wake_get)
return;
 
WARN_ON(dev_priv->uncore.forcewake_count > 0);
}
 
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((reg) < 0x40000 && (reg) != FORCEWAKE)
 
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
 
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
REG_RANGE((reg), 0x5000, 0x8000) || \
REG_RANGE((reg), 0xB000, 0x12000) || \
REG_RANGE((reg), 0x2E000, 0x30000))
 
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x22000, 0x24000) || \
REG_RANGE((reg), 0x30000, 0x40000))
 
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
REG_RANGE((reg), 0x5000, 0x8000) || \
REG_RANGE((reg), 0x8300, 0x8500) || \
REG_RANGE((reg), 0xB000, 0xC000) || \
REG_RANGE((reg), 0xE000, 0xE800))
 
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x8800, 0x8900) || \
REG_RANGE((reg), 0xD000, 0xD800) || \
REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x1A000, 0x1C000) || \
REG_RANGE((reg), 0x1E800, 0x1EA00) || \
REG_RANGE((reg), 0x30000, 0x40000))
 
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x4000, 0x5000) || \
REG_RANGE((reg), 0x8000, 0x8300) || \
REG_RANGE((reg), 0x8500, 0x8600) || \
REG_RANGE((reg), 0x9000, 0xB000) || \
REG_RANGE((reg), 0xC000, 0xC800) || \
REG_RANGE((reg), 0xF000, 0x10000) || \
REG_RANGE((reg), 0x14000, 0x14400) || \
REG_RANGE((reg), 0x22000, 0x24000))
 
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
428,34 → 514,38
}
 
static void
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
bool before)
{
const char *op = read ? "reading" : "writing to";
const char *when = before ? "before" : "after";
 
if (!i915.mmio_debug)
return;
 
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
reg);
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
 
static void
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
if (i915.mmio_debug)
return;
 
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unclaimed write to %x\n", reg);
DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
 
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
"Device suspended\n");
}
 
#define REG_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define REG_READ_FOOTER \
484,17 → 574,18
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
if (dev_priv->uncore.forcewake_count == 0) \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (dev_priv->uncore.forcewake_count == 0 && \
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
FORCEWAKE_ALL); \
val = __raw_i915_read##x(dev_priv, reg); \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
FORCEWAKE_ALL); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
REG_READ_FOOTER; \
}
 
502,31 → 593,51
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
unsigned fwengine = 0; \
unsigned *fwcount; \
REG_READ_HEADER(x); \
if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
fwcount = &dev_priv->uncore.fw_rendercount; \
} else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} \
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
val = __raw_i915_read##x(dev_priv, reg); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
REG_READ_FOOTER; \
}
 
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
unsigned fwengine = 0; \
REG_READ_HEADER(x); \
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
fwcount = &dev_priv->uncore.fw_mediacount; \
} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} \
if (fwengine != 0) { \
if ((*fwcount)++ == 0) \
(dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
fwengine); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
val = __raw_i915_read##x(dev_priv, reg); \
if (--(*fwcount) == 0) \
(dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
fwengine); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
REG_READ_FOOTER; \
}
 
 
__chv_read(8)
__chv_read(16)
__chv_read(32)
__chv_read(64)
__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
544,6 → 655,7
__gen4_read(32)
__gen4_read(64)
 
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
554,6 → 666,7
#define REG_WRITE_HEADER \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define REG_WRITE_FOOTER \
584,7 → 697,6
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
assert_device_not_suspended(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
600,13 → 712,13
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
assert_device_not_suspended(dev_priv); \
hsw_unclaimed_reg_clear(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
 
634,20 → 746,56
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
bool __needs_put = reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg); \
REG_WRITE_HEADER; \
if (__needs_put) { \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
FORCEWAKE_ALL); \
} \
__raw_i915_write##x(dev_priv, reg, val); \
if (__needs_put) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
FORCEWAKE_ALL); \
} else { \
__raw_i915_write##x(dev_priv, reg, val); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
 
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
unsigned fwengine = 0; \
bool shadowed = is_gen8_shadowed(dev_priv, reg); \
REG_WRITE_HEADER; \
if (!shadowed) { \
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
__raw_i915_write##x(dev_priv, reg, val); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
REG_WRITE_FOOTER; \
}
 
__chv_write(8)
__chv_write(16)
__chv_write(32)
__chv_write(64)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
669,6 → 817,7
__gen4_write(32)
__gen4_write(64)
 
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
681,15 → 830,17
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
gen6_force_wake_work);
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
 
intel_uncore_early_sanitize(dev, false);
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
 
703,16 → 854,16
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_mt_get;
__gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_mt_put;
__gen7_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
730,6 → 881,17
 
switch (INTEL_INFO(dev)->gen) {
default:
if (IS_CHERRYVIEW(dev)) {
dev_priv->uncore.funcs.mmio_writeb = chv_write8;
dev_priv->uncore.funcs.mmio_writew = chv_write16;
dev_priv->uncore.funcs.mmio_writel = chv_write32;
dev_priv->uncore.funcs.mmio_writeq = chv_write64;
dev_priv->uncore.funcs.mmio_readb = chv_read8;
dev_priv->uncore.funcs.mmio_readw = chv_read16;
dev_priv->uncore.funcs.mmio_readl = chv_read32;
dev_priv->uncore.funcs.mmio_readq = chv_read64;
 
} else {
dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
dev_priv->uncore.funcs.mmio_writew = gen8_write16;
dev_priv->uncore.funcs.mmio_writel = gen8_write32;
738,6 → 900,7
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
}
break;
case 7:
case 6:
792,20 → 955,20
 
void intel_uncore_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
// flush_delayed_work(&dev_priv->uncore.force_wake_work);
 
/* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev);
intel_uncore_forcewake_reset(dev, false);
}
 
#define GEN_RANGE(l, h) GENMASK(h, l)
 
static const struct register_whitelist {
uint64_t offset;
uint32_t size;
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
uint32_t gen_bitmask;
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0x1F0 },
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
};
 
int i915_reg_read_ioctl(struct drm_device *dev,
814,7 → 977,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
int i;
int i, ret = 0;
 
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == reg->offset &&
840,10 → 1003,12
break;
default:
WARN_ON(1);
return -EINVAL;
ret = -EINVAL;
goto out;
}
 
return 0;
out:
return ret;
}
 
int i915_get_reset_stats_ioctl(struct drm_device *dev,
852,23 → 1017,22
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
struct intel_context *ctx;
int ret;
 
if (args->flags || args->pad)
return -EINVAL;
 
if (args->ctx_id == DEFAULT_CONTEXT_ID)
return -EPERM;
 
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
 
hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
if (IS_ERR(hs)) {
ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(hs);
return PTR_ERR(ctx);
}
hs = &ctx->hang_stats;
 
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
 
891,6 → 1055,9
{
int ret;
 
/* FIXME: i965g/gm need a display save/restore for gpu reset. */
return -ENODEV;
 
/*
* Set the domains we want to reset (GRDOM/bits 2 and 3) as
* well as the reset bit (GR/bit 0). Setting the GR bit
902,7 → 1069,6
if (ret)
return ret;
 
/* We can't reset render&media without also resetting display ... */
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
 
915,26 → 1081,58
return 0;
}
 
static int g4x_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
 
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
 
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
 
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
 
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
 
return 0;
}
 
static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 gdrst;
int ret;
 
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
ILK_GRDOM_RESET_ENABLE) == 0, 500);
if (ret)
return ret;
 
/* We can't reset render&media without also resetting display ... */
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
gdrst &= ~GRDOM_MASK;
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
ILK_GRDOM_RESET_ENABLE) == 0, 500);
if (ret)
return ret;
 
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
 
return 0;
}
 
static int gen6_do_reset(struct drm_device *dev)
941,13 → 1139,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
unsigned long irqflags;
 
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
/* Reset the chip */
 
/* GEN6_GDRST is not in the gt power well, no need to check
959,32 → 1151,24
/* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
 
intel_uncore_forcewake_reset(dev);
intel_uncore_forcewake_reset(dev, true);
 
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
else
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
 
/* Restore fifo count */
dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return ret;
}
 
int intel_gpu_reset(struct drm_device *dev)
{
switch (INTEL_INFO(dev)->gen) {
case 8:
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
case 4: return i965_do_reset(dev);
default: return -ENODEV;
if (INTEL_INFO(dev)->gen >= 6)
return gen6_do_reset(dev);
else if (IS_GEN5(dev))
return ironlake_do_reset(dev);
else if (IS_G4X(dev))
return g4x_do_reset(dev);
else if (IS_GEN4(dev))
return i965_do_reset(dev);
else
return -ENODEV;
}
}
 
void intel_uncore_check_errors(struct drm_device *dev)
{
/drivers/video/drm/i915/kms_display.c
1,12 → 1,10
 
#define iowrite32(v, addr) writel((v), (addr))
 
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include <drm/drmP.h>
#include <uapi/drm/drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
 
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
29,8 → 27,8
struct drm_i915_gem_object *cobj;
}cursor_t;
 
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
#define KMS_CURSOR_WIDTH 64
#define KMS_CURSOR_HEIGHT 64
 
 
struct tag_display
96,30 → 94,26
return name;
}
 
bool set_mode(struct drm_device *dev, struct drm_connector *connector,
videomode_t *reqmode, bool strict)
static int set_mode(struct drm_device *dev, struct drm_connector *connector,
struct drm_crtc *crtc, videomode_t *reqmode, bool strict)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_fb_helper *fb_helper = &dev_priv->fbdev->helper;
struct drm_i915_private *dev_priv = dev->dev_private;
 
struct drm_mode_config *config = &dev->mode_config;
struct drm_display_mode *mode = NULL, *tmpmode;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_mode_set set;
const char *con_name;
const char *enc_name;
unsigned hdisplay, vdisplay;
int stride;
int ret;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) &&
if( (tmpmode->hdisplay == reqmode->width) &&
(tmpmode->vdisplay == reqmode->height) &&
(drm_mode_vrefresh(tmpmode) == reqmode->freq) )
{
mode = tmpmode;
131,8 → 125,8
{
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) )
if( (tmpmode->hdisplay == reqmode->width) &&
(tmpmode->vdisplay == reqmode->height) )
{
mode = tmpmode;
goto do_set;
146,15 → 140,11
 
do_set:
 
encoder = connector->encoder;
crtc = encoder->crtc;
con_name = connector->name;
 
con_name = drm_get_connector_name(connector);
enc_name = drm_get_encoder_name(encoder);
 
DRM_DEBUG_KMS("set mode %d %d: crtc %d connector %s encoder %s\n",
DRM_DEBUG_KMS("set mode %d %d: crtc %d connector %s\n",
reqmode->width, reqmode->height, crtc->base.id,
con_name, enc_name);
con_name);
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
164,15 → 154,15
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
 
fb = fb_helper->fb;
fb = crtc->primary->fb;
 
fb->width = reqmode->width;
fb->height = reqmode->height;
 
if(dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_NONE)
{
main_fb_obj->tiling_mode = I915_TILING_X;
 
if( main_fb_obj->tiling_mode == I915_TILING_X)
{
if(IS_GEN3(dev))
for (stride = 512; stride < reqmode->width * 4; stride <<= 1);
else
180,7 → 170,6
}
else
{
main_fb_obj->tiling_mode = I915_TILING_NONE;
stride = ALIGN(reqmode->width * 4, 64);
}
 
194,10 → 183,10
fb->bits_per_pixel = 32;
fb->depth = 24;
 
crtc->fb = fb;
crtc->enabled = true;
os_display->crtc = crtc;
 
// i915_gem_object_unpin_fence(main_fb_obj);
i915_gem_object_put_fence(main_fb_obj);
 
set.crtc = crtc;
207,9 → 196,11
set.connectors = &connector;
set.num_connectors = 1;
set.fb = fb;
ret = crtc->funcs->set_config(&set);
mutex_unlock(&dev->mode_config.mutex);
 
ret = drm_mode_set_config_internal(&set);
 
drm_modeset_unlock_all(dev);
 
if ( !ret )
{
os_display->width = fb->width;
240,97 → 231,75
return count;
};
 
static struct drm_connector* get_def_connector(struct drm_device *dev)
static struct drm_crtc *get_possible_crtc(struct drm_device *dev, struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_crtc *tmp_crtc;
int crtc_mask = 1;
 
struct drm_connector *def_connector = NULL;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head)
{
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
if( connector->status != connector_status_connected)
continue;
 
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if( encoder == NULL)
continue;
 
connector->encoder = encoder;
 
crtc = encoder->crtc;
 
DRM_DEBUG_KMS("CONNECTOR %x ID: %d status %d encoder %x\n crtc %x",
connector, connector->base.id,
connector->status, connector->encoder,
crtc);
 
// if (crtc == NULL)
// continue;
 
def_connector = connector;
 
break;
if (encoder->possible_crtcs & crtc_mask)
{
encoder->crtc = tmp_crtc;
DRM_DEBUG_KMS("use CRTC %p ID %d\n", tmp_crtc, tmp_crtc->base.id);
return tmp_crtc;
};
 
return def_connector;
crtc_mask <<= 1;
};
return NULL;
};
 
struct drm_connector *get_active_connector(struct drm_device *dev)
static int choose_config(struct drm_device *dev, struct drm_connector **boot_connector,
struct drm_crtc **boot_crtc)
{
struct drm_connector *tmp = NULL;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
list_for_each_entry(tmp, &dev->mode_config.connector_list, head)
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
if( tmp->status != connector_status_connected)
if( connector->status != connector_status_connected)
continue;
 
connector_funcs = tmp->helper_private;
encoder = connector_funcs->best_encoder(tmp);
encoder = connector->encoder;
 
if( encoder == NULL)
{
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
 
if( encoder == NULL)
{
DRM_DEBUG_KMS("CONNECTOR %x ID: %d no active encoders\n",
tmp, tmp->base.id);
connector, connector->base.id);
continue;
};
}
 
tmp->encoder = encoder;
crtc = encoder->crtc;
if(crtc == NULL)
crtc = get_possible_crtc(dev, encoder);
 
DRM_DEBUG_KMS("CONNECTOR %p ID:%d status:%d ENCODER %x CRTC %p ID:%d\n",
tmp, tmp->base.id, tmp->status, tmp->encoder,
tmp->encoder->crtc, tmp->encoder->crtc->base.id );
if(crtc != NULL)
{
*boot_connector = connector;
*boot_crtc = crtc;
 
return tmp;
};
 
return NULL;
DRM_DEBUG_KMS("CONNECTOR %p ID:%d status:%d ENCODER %p ID: %d CRTC %p ID:%d\n",
connector, connector->base.id, connector->status,
encoder, encoder->base.id, crtc, crtc->base.id );
return 0;
}
else
DRM_DEBUG_KMS("No CRTC for encoder %d\n", encoder->base.id);
 
struct drm_crtc *get_possible_crtc(struct drm_device *dev, struct drm_encoder *encoder)
{
struct drm_crtc *tmp_crtc;
int crtc_mask = 1;
};
 
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head)
{
if (encoder->possible_crtcs & crtc_mask)
{
encoder->crtc = tmp_crtc;
dbgprintf("CRTC %p\n", tmp_crtc);
return tmp_crtc;
return -ENOENT;
};
crtc_mask <<= 1;
};
return NULL;
};
 
int get_boot_mode(struct drm_connector *connector, videomode_t *usermode)
static int get_boot_mode(struct drm_connector *connector, videomode_t *usermode)
{
struct drm_display_mode *mode;
 
337,11 → 306,11
list_for_each_entry(mode, &connector->modes, head)
{
DRM_DEBUG_KMS("check mode w:%d h:%d %dHz\n",
drm_mode_width(mode), drm_mode_height(mode),
mode->hdisplay, mode->vdisplay,
drm_mode_vrefresh(mode));
 
if( os_display->width == drm_mode_width(mode) &&
os_display->height == drm_mode_height(mode) &&
if( os_display->width == mode->hdisplay &&
os_display->height == mode->vdisplay &&
drm_mode_vrefresh(mode) == 60)
{
usermode->width = os_display->width;
355,20 → 324,19
 
int init_display_kms(struct drm_device *dev, videomode_t *usermode)
{
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
struct drm_connector *connector = NULL;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb;
 
cursor_t *cursor;
u32_t ifl;
int err;
int ret;
 
mutex_lock(&dev->mode_config.mutex);
 
connector = get_active_connector(dev) ;
if(connector == NULL)
ret = choose_config(dev, &connector, &crtc);
if(ret)
{
DRM_DEBUG_KMS("No active connectors!\n");
mutex_unlock(&dev->mode_config.mutex);
375,21 → 343,24
return -1;
};
 
encoder = connector->encoder;
crtc = encoder->crtc;
mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
 
if(crtc == NULL)
crtc = get_possible_crtc(dev, encoder);
if (!main_fb_obj->base.name) {
ret = idr_alloc(&dev->object_name_idr, &main_fb_obj->base, 1, 0, GFP_NOWAIT);
 
if(crtc == NULL)
{
DRM_DEBUG_KMS("No CRTC for encoder %d\n", encoder->base.id);
mutex_unlock(&dev->mode_config.mutex);
return -1;
};
main_fb_obj->base.name = ret;
 
DRM_DEBUG_KMS("[Select CRTC: %p ID:%d]\n",crtc, crtc->base.id);
/* Allocate a reference for the name table. */
drm_gem_object_reference(&main_fb_obj->base);
 
DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, main_fb_obj->base.name );
}
 
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference(&main_fb_obj->base);
 
os_display = GetDisplay();
os_display->ddev = dev;
os_display->connector = connector;
397,11 → 368,8
 
os_display->supported_modes = count_connector_modes(connector);
 
 
ifl = safe_cli();
{
struct intel_crtc *intel_crtc = to_intel_crtc(os_display->crtc);
 
list_for_each_entry(cursor, &os_display->cursors, list)
{
init_cursor(cursor);
415,8 → 383,8
os_display->restore_cursor = restore_cursor;
os_display->disable_mouse = disable_mouse;
 
intel_crtc->cursor_x = os_display->width/2;
intel_crtc->cursor_y = os_display->height/2;
crtc->cursor_x = os_display->width/2;
crtc->cursor_y = os_display->height/2;
 
select_cursor_kms(os_display->cursor);
};
430,8 → 398,8
struct drm_display_mode *mode;
 
mode = list_entry(connector->modes.next, typeof(*mode), head);
usermode->width = drm_mode_width(mode);
usermode->height = drm_mode_height(mode);
usermode->width = mode->hdisplay;
usermode->height = mode->vdisplay;
usermode->freq = drm_mode_vrefresh(mode);
};
};
438,7 → 406,7
 
mutex_unlock(&dev->mode_config.mutex);
 
set_mode(dev, os_display->connector, usermode, false);
set_mode(dev, os_display->connector, os_display->crtc, usermode, false);
 
#ifdef __HWA__
err = init_bitmaps();
473,8 → 441,8
{
if( i < *count)
{
mode->width = drm_mode_width(drmmode);
mode->height = drm_mode_height(drmmode);
mode->width = drmmode->hdisplay;
mode->height = drmmode->vdisplay;
mode->bpp = 32;
mode->freq = drm_mode_vrefresh(drmmode);
i++;
490,7 → 458,6
 
int set_user_mode(videomode_t *mode)
{
int err = -1;
 
// dbgprintf("width %d height %d vrefresh %d\n",
// mode->width, mode->height, mode->freq);
502,11 → 469,10
(mode->height != os_display->height) ||
(mode->freq != os_display->vrefresh) ) )
{
if( set_mode(os_display->ddev, os_display->connector, mode, true) )
err = 0;
return set_mode(os_display->ddev, os_display->connector, os_display->crtc, mode, true);
};
 
return err;
return -1;
};
 
void i915_dpms(struct drm_device *dev, int mode)
520,7 → 486,7
{
list_del(&cursor->list);
 
i915_gem_object_unpin(cursor->cobj);
i915_gem_object_ggtt_unpin(cursor->cobj);
 
mutex_lock(&main_device->struct_mutex);
drm_gem_object_unreference(&cursor->cobj->base);
540,9 → 506,9
int i,j;
int ret;
 
if (dev_priv->info->cursor_needs_physical)
if (dev_priv->info.cursor_needs_physical)
{
bits = (uint32_t*)KernelAlloc(CURSOR_WIDTH*CURSOR_HEIGHT*4);
bits = (uint32_t*)KernelAlloc(KMS_CURSOR_WIDTH*KMS_CURSOR_HEIGHT*4);
if (unlikely(bits == NULL))
return ENOMEM;
cursor->cobj = (struct drm_i915_gem_object *)GetPgAddr(bits);
549,11 → 515,11
}
else
{
obj = i915_gem_alloc_object(os_display->ddev, CURSOR_WIDTH*CURSOR_HEIGHT*4);
obj = i915_gem_alloc_object(os_display->ddev, KMS_CURSOR_WIDTH*KMS_CURSOR_HEIGHT*4);
if (unlikely(obj == NULL))
return -ENOMEM;
 
ret = i915_gem_obj_ggtt_pin(obj, CURSOR_WIDTH*CURSOR_HEIGHT*4, true, true);
ret = i915_gem_obj_ggtt_pin(obj, 0,PIN_MAPPABLE | PIN_NONBLOCK);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
562,7 → 528,7
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
{
i915_gem_object_unpin(obj);
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
570,11 → 536,11
* GTT space is continuous. I guarantee it. */
 
mapped = bits = (u32*)MapIoMem(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
CURSOR_WIDTH*CURSOR_HEIGHT*4, PG_SW);
KMS_CURSOR_WIDTH*KMS_CURSOR_HEIGHT*4, PG_SW);
 
if (unlikely(bits == NULL))
{
i915_gem_object_unpin(obj);
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
return -ENOMEM;
};
587,10 → 553,10
{
for(j = 0; j < 32; j++)
*bits++ = *src++;
for(j = 32; j < CURSOR_WIDTH; j++)
for(j = 32; j < KMS_CURSOR_WIDTH; j++)
*bits++ = 0;
}
for(i = 0; i < CURSOR_WIDTH*(CURSOR_HEIGHT-32); i++)
for(i = 0; i < KMS_CURSOR_WIDTH*(KMS_CURSOR_HEIGHT-32); i++)
*bits++ = 0;
 
FreeKernelSpace(mapped);
613,9 → 579,14
x-= cursor->hot_x;
y-= cursor->hot_y;
 
if (crtc->funcs->cursor_move)
crtc->funcs->cursor_move(crtc, x, y);
crtc->cursor_x = x;
crtc->cursor_y = y;
 
intel_crtc_update_cursor(crtc, 1);
 
// if (crtc->funcs->cursor_move)
// crtc->funcs->cursor_move(crtc, x, y);
 
};
 
 
622,7 → 593,9
cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
{
struct drm_i915_private *dev_priv = os_display->ddev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(os_display->crtc);
struct drm_crtc *crtc = os_display->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
cursor_t *old;
 
old = os_display->cursor;
630,15 → 603,15
 
intel_crtc->cursor_bo = cursor->cobj;
 
if (!dev_priv->info->cursor_needs_physical)
if (!dev_priv->info.cursor_needs_physical)
intel_crtc->cursor_addr = i915_gem_obj_ggtt_offset(cursor->cobj);
else
intel_crtc->cursor_addr = (addr_t)cursor->cobj;
 
intel_crtc->cursor_width = 32;
intel_crtc->cursor_height = 32;
intel_crtc->cursor_width = 64;
intel_crtc->cursor_height = 64;
 
move_cursor_kms(cursor, intel_crtc->cursor_x, intel_crtc->cursor_y);
move_cursor_kms(cursor, crtc->cursor_x, crtc->cursor_y);
return old;
};
 
715,7 → 688,7
{
printf("left %d top %d right %d bottom %d\n",
winrc.left, winrc.top, winrc.right, winrc.bottom);
printf("mask pitch %d data %p\n", mask->bo_pitch, mask->bo_size);
printf("mask pitch %d data %p\n", mask->bo_pitch, mask->bo_map);
warn_count++;
};
#endif
/drivers/video/drm/i915/main.c
1,8 → 1,6
#include <drm/drmP.h>
#include <drm.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
//#include "intel_drv.h"
 
#include <linux/kernel.h>
#include <linux/module.h>
13,6 → 11,11
 
#include "bitmap.h"
 
#define I915_DEV_CLOSE 0
#define I915_DEV_INIT 1
#define I915_DEV_READY 2
 
 
struct pci_device {
uint16_t domain;
uint8_t bus;
52,11 → 55,11
struct drm_file *file);
 
struct cmdtable cmdtable[]= {
CMDENTRY("-pm=", i915_powersave),
CMDENTRY("-rc6=", i915_enable_rc6),
CMDENTRY("-fbc=", i915_enable_fbc),
CMDENTRY("-ppgt=", i915_enable_ppgtt),
CMDENTRY("-pc8=", i915_enable_pc8),
// CMDENTRY("-pm=", i915_powersave),
// CMDENTRY("-rc6=", i915_enable_rc6),
// CMDENTRY("-fbc=", i915_enable_fbc),
// CMDENTRY("-ppgt=", i915_enable_ppgtt),
// CMDENTRY("-pc8=", i915_enable_pc8),
{NULL, 0}
};
 
63,6 → 66,8
 
static char log[256];
 
unsigned long volatile jiffies;
 
struct workqueue_struct *system_wq;
int driver_wq_state;
 
93,8 → 98,8
 
void i915_driver_thread()
{
struct drm_i915_private *dev_priv = main_device->dev_private;
struct workqueue_struct *cwq = dev_priv->wq;
struct drm_i915_private *dev_priv = NULL;
struct workqueue_struct *cwq = NULL;
static int dpms = 1;
static int dpms_lock = 0;
oskey_t key;
103,12 → 108,23
 
printf("%s\n",__FUNCTION__);
 
while(driver_wq_state == I915_DEV_INIT)
{
jiffies = GetTimerTicks();
delay(1);
};
 
dev_priv = main_device->dev_private;
cwq = dev_priv->wq;
 
asm volatile("int $0x40":"=a"(tmp):"a"(66),"b"(1),"c"(1));
asm volatile("int $0x40":"=a"(tmp):"a"(66),"b"(4),"c"(0x46),"d"(0x330));
asm volatile("int $0x40":"=a"(tmp):"a"(66),"b"(4),"c"(0xC6),"d"(0x330));
 
while(driver_wq_state != 0)
while(driver_wq_state != I915_DEV_CLOSE)
{
jiffies = GetTimerTicks();
 
key = get_key();
 
if( (key.val != 1) && (key.state == 0x02))
156,11 → 172,14
 
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
static pci_dev_t device;
const struct pci_device_id *ent;
 
int err = 0;
 
if(action != 1)
{
driver_wq_state = 0;
driver_wq_state = I915_DEV_CLOSE;
return 0;
};
 
167,7 → 186,7
if( GetService("DISPLAY") != 0 )
return 0;
 
printf("\ni915 v3.14-rc1 build %s %s\nusage: i915 [options]\n"
printf("\ni915 v3.17-rc2 build %s %s\nusage: i915 [options]\n"
"-pm=<0,1> Enable powersavings, fbc, downclocking, etc. (default: 1 - true)\n",
__DATE__, __TIME__);
printf("-rc6=<-1,0-7> Enable power-saving render C-state 6.\n"
194,15 → 213,26
cpu_detect();
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
 
enum_pci_devices();
err = enum_pci_devices();
if( unlikely(err != 0) )
{
dbgprintf("Device enumeration failed\n");
return 0;
}
 
driver_wq_state = I915_DEV_INIT;
CreateKernelThread(i915_driver_thread);
 
err = i915_init();
if(err)
if(unlikely(err!= 0))
{
driver_wq_state = I915_DEV_CLOSE;
dbgprintf("Epic Fail :(\n");
return 0;
};
 
driver_wq_state = I915_DEV_READY;
 
init_display_kms(main_device, &usermode);
 
err = RegService("DISPLAY", display_handler);
210,10 → 240,7
if( err != 0)
dbgprintf("Set DISPLAY handler\n");
 
driver_wq_state = 1;
 
CreateKernelThread(i915_driver_thread);
 
return err;
};
 
318,28 → 345,6
retval = get_driver_caps((hwcaps_t*)inp);
break;
 
case SRV_CREATE_SURFACE:
// check_input(8);
// retval = create_surface(main_device, (struct io_call_10*)inp);
break;
 
case SRV_LOCK_SURFACE:
// retval = lock_surface((struct io_call_12*)inp);
break;
 
case SRV_RESIZE_SURFACE:
// retval = resize_surface((struct io_call_14*)inp);
break;
 
case SRV_BLIT_BITMAP:
// srv_blit_bitmap( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
// blit_tex( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
break;
 
case SRV_GET_PCI_INFO:
get_pci_info((struct pci_device *)inp);
retval = 0;
507,10 → 512,169
__cpuid(eax, ebx, ecx, edx);
}
 
struct mtrr
{
u64_t base;
u64_t mask;
};
 
struct cpuinfo
{
u64_t caps;
u64_t def_mtrr;
u64_t mtrr_cap;
int var_mtrr_count;
int fix_mtrr_count;
struct mtrr var_mtrr[9];
char model_name[64];
};
 
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
 
#define MSR_MTRRdefType 0x000002ff
 
#define IA32_MTRRCAP 0xFE
#define IA32_CR_PAT_MSR 0x277
 
#define PAT_TYPE_UC 0
#define PAT_TYPE_WC 1
#define PAT_TYPE_WB 6
#define PAT_TYPE_UCM 7
 
 
#define MTRR_UC 0
#define MTRR_WC 1
#define MTRR_WB 6
 
static inline u64_t read_msr(u32_t msr)
{
union {
u64_t val;
struct {
u32_t low;
u32_t high;
};
}tmp;
 
asm volatile (
"rdmsr"
: "=a" (tmp.low), "=d" (tmp.high)
: "c" (msr));
return tmp.val;
}
 
static inline void write_msr(u32_t msr, u64_t val)
{
union {
u64_t val;
struct {
u32_t low;
u32_t high;
};
}tmp;
 
tmp.val = val;
 
asm volatile (
"wrmsr"
:: "a" (tmp.low), "d" (tmp.high), "c" (msr));
}
 
#define rdmsr(msr, low, high) \
do { \
u64 __val = read_msr((msr)); \
(void)((low) = (u32)__val); \
(void)((high) = (u32)(__val >> 32)); \
} while (0)
 
static inline void native_write_msr(unsigned int msr,
unsigned low, unsigned high)
{
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
}
 
static inline void wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
}
 
#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
 
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, int type)
{
unsigned int base_lo, base_hi, mask_lo, mask_hi;
u64 size_or_mask, size_and_mask;
 
size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000;
 
if (size == 0) {
/*
* The invalid bit is kept in the mask, so we simply
* clear the relevant mask register to disable a range.
*/
native_write_msr(MTRRphysMask_MSR(reg), 0, 0);
}
else {
base_lo = base << PAGE_SHIFT | type;
base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
mask_lo = -size << PAGE_SHIFT | 0x800;
mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
 
native_write_msr(MTRRphysBase_MSR(reg), base_lo, base_hi);
native_write_msr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
};
}
 
static unsigned long __force_order;
 
static inline unsigned long read_cr0(void)
{
unsigned long val;
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
 
static inline void write_cr0(unsigned long val)
{
asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
}
 
static inline unsigned long read_cr4(void)
{
unsigned long val;
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
 
static inline void write_cr4(unsigned long val)
{
asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
}
 
static inline unsigned long read_cr3(void)
{
unsigned long val;
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
 
static inline void write_cr3(unsigned long val)
{
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
}
 
static u32 deftype_lo, deftype_hi;
 
void cpu_detect()
{
struct cpuinfo cpuinfo;
 
u32 junk, tfms, cap0, misc;
 
int i;
#if 0
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 
if (cap0 & (1<<19))
518,6 → 682,97
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
}
 
cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
(unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
(unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]);
cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36],
(unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]);
 
printf("\n%s\n\n",cpuinfo.model_name);
 
cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType);
cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP);
 
printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr);
 
cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap;
 
for(i = 0; i < cpuinfo.var_mtrr_count; i++)
{
u64_t mtrr_base;
u64_t mtrr_mask;
 
cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
 
printf("MTRR_%d base: %016llx mask: %016llx\n", i,
cpuinfo.var_mtrr[i].base,
cpuinfo.var_mtrr[i].mask);
};
 
unsigned int cr0, cr3, cr4, eflags;
 
eflags = safe_cli();
 
/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
cr0 = read_cr0() | (1<<30);
write_cr0(cr0);
wbinvd();
 
cr4 = read_cr4();
write_cr4(cr4 & ~(1<<7));
 
cr3 = read_cr3();
write_cr3(cr3);
 
/* Save MTRR state */
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 
/* Disable MTRRs, and set the default type to uncached */
native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
wbinvd();
 
i = 0;
set_mtrr(i++,0,0x80000000>>12,MTRR_WB);
set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB);
set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB);
set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC);
set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC);
set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC);
 
for(; i < cpuinfo.var_mtrr_count; i++)
set_mtrr(i,0,0,0);
 
write_cr3(cr3);
 
/* Intel (P6) standard MTRRs */
native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 
/* Enable caches */
write_cr0(read_cr0() & ~(1<<30));
 
/* Restore value of CR4 */
write_cr4(cr4);
 
safe_sti(eflags);
 
printf("\nnew MTRR map\n\n");
 
for(i = 0; i < cpuinfo.var_mtrr_count; i++)
{
u64_t mtrr_base;
u64_t mtrr_mask;
 
cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
 
printf("MTRR_%d base: %016llx mask: %016llx\n", i,
cpuinfo.var_mtrr[i].base,
cpuinfo.var_mtrr[i].mask);
};
#endif
 
tsc_khz = (unsigned int)(GetCpuFreq()/1000);
}
 
/drivers/video/drm/i915/utils.c
191,34 → 191,7
}
 
 
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
 
i = vsnprintf(buf, size, fmt, args);
 
if (likely(i < size))
return i;
if (size != 0)
return size - 1;
return 0;
}
 
 
int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
 
va_start(args, fmt);
i = vscnprintf(buf, size, fmt, args);
va_end(args);
 
return i;
}
 
 
 
#define _U 0x01 /* upper */
#define _L 0x02 /* lower */
#define _D 0x04 /* digit */
505,3 → 478,35
}
 
 
 
void *kmap(struct page *page)
{
void *vaddr;
 
vaddr = (void*)MapIoMem(page_to_phys(page), 4096, PG_SW);
 
return vaddr;
}
 
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
 
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
 
tmp = (*p) | (~0UL << size);
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found:
return result + ffz(tmp);
}