Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 6083 → Rev 6084

/drivers/video/drm/drm_atomic.c
30,7 → 30,15
#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
 
static void kfree_state(struct drm_atomic_state *state)
/**
* drm_atomic_state_default_release -
* release memory initialized by drm_atomic_state_init
* @state: atomic state
*
* Free all the memory allocated by drm_atomic_state_init.
* This is useful for drivers that subclass the atomic state.
*/
void drm_atomic_state_default_release(struct drm_atomic_state *state)
{
kfree(state->connectors);
kfree(state->connector_states);
38,24 → 46,25
kfree(state->crtc_states);
kfree(state->planes);
kfree(state->plane_states);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
 
/**
* drm_atomic_state_alloc - allocate atomic state
* drm_atomic_state_init - init new atomic state
* @dev: DRM device
* @state: atomic state
*
* This allocates an empty atomic state to track updates.
* Default implementation for filling in a new atomic state.
* This is useful for drivers that subclass the atomic state.
*/
struct drm_atomic_state *
drm_atomic_state_alloc(struct drm_device *dev)
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_atomic_state *state;
/* TODO legacy paths should maybe do a better job about
* setting this appropriately?
*/
state->allow_modeset = true;
 
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
 
state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
 
state->crtcs = kcalloc(dev->mode_config.num_crtc,
87,37 → 96,56
 
state->dev = dev;
 
DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
 
return state;
return 0;
fail:
kfree_state(state);
drm_atomic_state_default_release(state);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_atomic_state_init);
 
/**
* drm_atomic_state_alloc - allocate atomic state
* @dev: DRM device
*
* This allocates an empty atomic state to track updates.
*/
struct drm_atomic_state *
drm_atomic_state_alloc(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state;
 
if (!config->funcs->atomic_state_alloc) {
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
if (drm_atomic_state_init(dev, state) < 0) {
kfree(state);
return NULL;
}
return state;
}
 
return config->funcs->atomic_state_alloc(dev);
}
EXPORT_SYMBOL(drm_atomic_state_alloc);
 
/**
* drm_atomic_state_clear - clear state object
* drm_atomic_state_default_clear - clear base atomic state
* @state: atomic state
*
* When the w/w mutex algorithm detects a deadlock we need to back off and drop
* all locks. So someone else could sneak in and change the current modeset
* configuration. Which means that all the state assembled in @state is no
* longer an atomic update to the current state, but to some arbitrary earlier
* state. Which could break assumptions the driver's ->atomic_check likely
* relies on.
*
* Hence we must clear all cached state and completely start over, using this
* function.
* Default implementation for clearing atomic state.
* This is useful for drivers that subclass the atomic state.
*/
void drm_atomic_state_clear(struct drm_atomic_state *state)
void drm_atomic_state_default_clear(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
int i;
 
DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
 
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i];
125,10 → 153,18
if (!connector)
continue;
 
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
connector->funcs->atomic_destroy_state(connector,
/*
* FIXME: Async commits can race with connector unplugging and
* there's currently nothing that prevents cleanup up state for
* deleted connectors. As long as the callback doesn't look at
* the connector we'll be fine though, so make sure that's the
* case by setting all connector pointers to NULL.
*/
state->connector_states[i]->connector = NULL;
connector->funcs->atomic_destroy_state(NULL,
state->connector_states[i]);
state->connectors[i] = NULL;
state->connector_states[i] = NULL;
}
 
for (i = 0; i < config->num_crtc; i++) {
139,6 → 175,8
 
crtc->funcs->atomic_destroy_state(crtc,
state->crtc_states[i]);
state->crtcs[i] = NULL;
state->crtc_states[i] = NULL;
}
 
for (i = 0; i < config->num_total_plane; i++) {
149,8 → 187,36
 
plane->funcs->atomic_destroy_state(plane,
state->plane_states[i]);
state->planes[i] = NULL;
state->plane_states[i] = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_state_default_clear);
 
/**
* drm_atomic_state_clear - clear state object
* @state: atomic state
*
* When the w/w mutex algorithm detects a deadlock we need to back off and drop
* all locks. So someone else could sneak in and change the current modeset
* configuration. Which means that all the state assembled in @state is no
* longer an atomic update to the current state, but to some arbitrary earlier
* state. Which could break assumptions the driver's ->atomic_check likely
* relies on.
*
* Hence we must clear all cached state and completely start over, using this
* function.
*/
void drm_atomic_state_clear(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
 
if (config->funcs->atomic_state_clear)
config->funcs->atomic_state_clear(state);
else
drm_atomic_state_default_clear(state);
}
EXPORT_SYMBOL(drm_atomic_state_clear);
 
/**
162,12 → 228,26
*/
void drm_atomic_state_free(struct drm_atomic_state *state)
{
struct drm_device *dev;
struct drm_mode_config *config;
 
if (!state)
return;
 
dev = state->dev;
config = &dev->mode_config;
 
drm_atomic_state_clear(state);
 
DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
 
kfree_state(state);
if (config->funcs->atomic_state_free) {
config->funcs->atomic_state_free(state);
} else {
drm_atomic_state_default_release(state);
kfree(state);
}
}
EXPORT_SYMBOL(drm_atomic_state_free);
 
/**
189,14 → 269,13
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
int ret, index;
int ret, index = drm_crtc_index(crtc);
struct drm_crtc_state *crtc_state;
 
index = drm_crtc_index(crtc);
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
if (crtc_state)
return crtc_state;
 
if (state->crtc_states[index])
return state->crtc_states[index];
 
ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
209,7 → 288,7
state->crtcs[index] = crtc;
crtc_state->state = state;
 
DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n",
crtc->base.id, crtc_state, state);
 
return crtc_state;
217,6 → 296,216
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
/**
* drm_atomic_set_mode_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
* @mode: kernel-internal mode to use for the CRTC, or NULL to disable
*
* Set a mode (originating from the kernel) on the desired CRTC state. Does
* not change any other state properties, including enable, active, or
* mode_changed.
*
* RETURNS:
* Zero on success, error code on failure. Cannot return -EDEADLK.
*/
int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
struct drm_display_mode *mode)
{
struct drm_mode_modeinfo umode;
 
/* Early return for no change. */
if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
return 0;
 
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL;
 
if (mode) {
drm_mode_convert_to_umode(&umode, mode);
state->mode_blob =
drm_property_create_blob(state->crtc->dev,
sizeof(umode),
&umode);
if (IS_ERR(state->mode_blob))
return PTR_ERR(state->mode_blob);
 
drm_mode_copy(&state->mode, mode);
state->enable = true;
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
mode->name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state);
}
 
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
 
/**
* drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
* @state: the CRTC whose incoming state to update
* @blob: pointer to blob property to use for mode
*
* Set a mode (originating from a blob property) on the desired CRTC state.
* This function will take a reference on the blob property for the CRTC state,
* and release the reference held on the state's existing mode property, if any
* was set.
*
* RETURNS:
* Zero on success, error code on failure. Cannot return -EDEADLK.
*/
int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob)
{
if (blob == state->mode_blob)
return 0;
 
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob);
state->mode_blob = NULL;
 
if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
drm_mode_convert_umode(&state->mode,
(const struct drm_mode_modeinfo *)
blob->data))
return -EINVAL;
 
state->mode_blob = drm_property_reference_blob(blob);
state->enable = true;
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
state->mode.name, state);
} else {
memset(&state->mode, 0, sizeof(state->mode));
state->enable = false;
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
state);
}
 
return 0;
}
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
 
/**
* drm_atomic_crtc_set_property - set property on CRTC
* @crtc: the drm CRTC to set a property on
* @state: the state object to update with the new property value
* @property: the property to set
* @val: the new property value
*
* Use this instead of calling crtc->atomic_set_property directly.
* This function handles generic/core properties and calls out to
* driver's ->atomic_set_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
int ret;
 
if (property == config->prop_active)
state->active = val;
else if (property == config->prop_mode_id) {
struct drm_property_blob *mode =
drm_property_lookup_blob(dev, val);
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
if (mode)
drm_property_unreference_blob(mode);
return ret;
}
else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
return -EINVAL;
 
return 0;
}
EXPORT_SYMBOL(drm_atomic_crtc_set_property);
 
/*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*/
static int
drm_atomic_crtc_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
 
if (property == config->prop_active)
*val = state->active;
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else
return -EINVAL;
 
return 0;
}
 
/**
* drm_atomic_crtc_check - check crtc state
* @crtc: crtc to check
* @state: crtc state to check
*
* Provides core sanity checks for crtc state.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int drm_atomic_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
/* NOTE: we explicitly don't enforce constraints such as primary
* layer covering entire screen, since that is something we want
* to allow (on hw that supports it). For hw that does not, it
* should be checked in driver's crtc->atomic_check() vfunc.
*
* TODO: Add generic modeset state checks once we support those.
*/
 
if (state->active && !state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n",
crtc->base.id);
return -EINVAL;
}
 
/* The state->enable vs. state->mode_blob checks can be WARN_ON,
* as this is a kernel-internal detail that userspace should never
* be able to trigger. */
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(state->enable && !state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n",
crtc->base.id);
return -EINVAL;
}
 
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(!state->enable && state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n",
crtc->base.id);
return -EINVAL;
}
 
return 0;
}
 
/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
* @plane: plane to get state object for
235,14 → 524,13
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
int ret, index;
int ret, index = drm_plane_index(plane);
struct drm_plane_state *plane_state;
 
index = drm_plane_index(plane);
plane_state = drm_atomic_get_existing_plane_state(state, plane);
if (plane_state)
return plane_state;
 
if (state->plane_states[index])
return state->plane_states[index];
 
ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
255,7 → 543,7
state->planes[index] = plane;
plane_state->state = state;
 
DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n",
plane->base.id, plane_state, state);
 
if (plane_state->crtc) {
272,6 → 560,210
EXPORT_SYMBOL(drm_atomic_get_plane_state);
 
/**
* drm_atomic_plane_set_property - set property on plane
* @plane: the drm plane to set a property on
* @state: the state object to update with the new property value
* @property: the property to set
* @val: the new property value
*
* Use this instead of calling plane->atomic_set_property directly.
* This function handles generic/core properties and calls out to
* driver's ->atomic_set_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
 
if (property == config->prop_fb_id) {
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_unreference(fb);
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
} else if (property == config->prop_crtc_y) {
state->crtc_y = U642I64(val);
} else if (property == config->prop_crtc_w) {
state->crtc_w = val;
} else if (property == config->prop_crtc_h) {
state->crtc_h = val;
} else if (property == config->prop_src_x) {
state->src_x = val;
} else if (property == config->prop_src_y) {
state->src_y = val;
} else if (property == config->prop_src_w) {
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
} else if (property == config->rotation_property) {
state->rotation = val;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
} else {
return -EINVAL;
}
 
return 0;
}
EXPORT_SYMBOL(drm_atomic_plane_set_property);
 
/*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*/
static int
drm_atomic_plane_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
 
if (property == config->prop_fb_id) {
*val = (state->fb) ? state->fb->base.id : 0;
} else if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->prop_crtc_x) {
*val = I642U64(state->crtc_x);
} else if (property == config->prop_crtc_y) {
*val = I642U64(state->crtc_y);
} else if (property == config->prop_crtc_w) {
*val = state->crtc_w;
} else if (property == config->prop_crtc_h) {
*val = state->crtc_h;
} else if (property == config->prop_src_x) {
*val = state->src_x;
} else if (property == config->prop_src_y) {
*val = state->src_y;
} else if (property == config->prop_src_w) {
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
} else if (property == config->rotation_property) {
*val = state->rotation;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
return -EINVAL;
}
 
return 0;
}
 
static bool
plane_switching_crtc(struct drm_atomic_state *state,
struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
if (!plane->state->crtc || !plane_state->crtc)
return false;
 
if (plane->state->crtc == plane_state->crtc)
return false;
 
/* This could be refined, but currently there's no helper or driver code
* to implement direct switching of active planes nor userspace to take
* advantage of more direct plane switching without the intermediate
* full OFF state.
*/
return true;
}
 
/**
* drm_atomic_plane_check - check plane state
* @plane: plane to check
* @state: plane state to check
*
* Provides core sanity checks for plane state.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int drm_atomic_plane_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
unsigned int fb_width, fb_height;
int ret;
 
/* either *both* CRTC and FB must be set, or neither */
if (WARN_ON(state->crtc && !state->fb)) {
DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
return -EINVAL;
} else if (WARN_ON(state->fb && !state->crtc)) {
DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
return -EINVAL;
}
 
/* if disabled, we don't care about the rest of the state: */
if (!state->crtc)
return 0;
 
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
return -EINVAL;
}
 
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
if (ret) {
DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
drm_get_format_name(state->fb->pixel_format));
return ret;
}
 
/* Give drivers some help against integer overflows */
if (state->crtc_w > INT_MAX ||
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
state->crtc_w, state->crtc_h,
state->crtc_x, state->crtc_y);
return -ERANGE;
}
 
fb_width = state->fb->width << 16;
fb_height = state->fb->height << 16;
 
/* Make sure source coordinates are inside the fb. */
if (state->src_w > fb_width ||
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
DRM_DEBUG_ATOMIC("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
return -ENOSPC;
}
 
if (plane_switching_crtc(state->state, plane, state)) {
DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n",
plane->base.id);
return -EINVAL;
}
 
return 0;
}
 
/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
311,7 → 803,7
* at most the array is a bit too large.
*/
if (index >= state->num_connector) {
DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
return ERR_PTR(-EAGAIN);
}
 
326,7 → 818,7
state->connectors[index] = connector;
connector_state->state = state;
 
DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
connector->base.id, connector_state, state);
 
if (connector_state->crtc) {
343,9 → 835,113
EXPORT_SYMBOL(drm_atomic_get_connector_state);
 
/**
* drm_atomic_connector_set_property - set property on connector.
* @connector: the drm connector to set a property on
* @state: the state object to update with the new property value
* @property: the property to set
* @val: the new property value
*
* Use this instead of calling connector->atomic_set_property directly.
* This function handles generic/core properties and calls out to
* driver's ->atomic_set_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
 
if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
* is done in legacy setprop path for us. Disallow (for
* now?) atomic writes to DPMS property:
*/
return -EINVAL;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
return -EINVAL;
}
}
EXPORT_SYMBOL(drm_atomic_connector_set_property);
 
/*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*/
static int
drm_atomic_connector_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
 
if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->dpms_property) {
*val = connector->dpms;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
} else {
return -EINVAL;
}
 
return 0;
}
 
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = property->dev;
int ret;
 
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR: {
struct drm_connector *connector = obj_to_connector(obj);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
ret = drm_atomic_connector_get_property(connector,
connector->state, property, val);
break;
}
case DRM_MODE_OBJECT_CRTC: {
struct drm_crtc *crtc = obj_to_crtc(obj);
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
ret = drm_atomic_crtc_get_property(crtc,
crtc->state, property, val);
break;
}
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
WARN_ON(!drm_modeset_is_locked(&plane->mutex));
ret = drm_atomic_plane_get_property(plane,
plane->state, property, val);
break;
}
default:
ret = -EINVAL;
break;
}
 
return ret;
}
 
/**
* drm_atomic_set_crtc_for_plane - set crtc for plane
* @state: the incoming atomic state
* @plane: the plane whose incoming state to update
* @plane_state: the plane whose incoming state to update
* @crtc: crtc to use for the plane
*
* Changing the assigned crtc for a plane requires us to grab the lock and state
358,16 → 954,12
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
struct drm_plane *plane, struct drm_crtc *crtc)
drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc)
{
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
struct drm_plane *plane = plane_state->plane;
struct drm_crtc_state *crtc_state;
 
if (WARN_ON(IS_ERR(plane_state)))
return PTR_ERR(plane_state);
 
if (plane_state->crtc) {
crtc_state = drm_atomic_get_crtc_state(plane_state->state,
plane_state->crtc);
388,10 → 980,11
}
 
if (crtc)
DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n",
plane_state, crtc->base.id);
else
DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
plane_state);
 
return 0;
}
398,7 → 991,7
EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
 
/**
* drm_atomic_set_fb_for_plane - set crtc for plane
* drm_atomic_set_fb_for_plane - set framebuffer for plane
* @plane_state: atomic state object for the plane
* @fb: fb to use for the plane
*
418,10 → 1011,11
plane_state->fb = fb;
 
if (fb)
DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
fb->base.id, plane_state);
else
DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
plane_state);
}
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
 
454,10 → 1048,10
conn_state->crtc = crtc;
 
if (crtc)
DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n",
conn_state, crtc->base.id);
else
DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
conn_state);
 
return 0;
494,7 → 1088,7
if (ret)
return ret;
 
DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n",
crtc->base.id, state);
 
/*
501,7 → 1095,7
* Changed connectors are already in @state, so only need to look at the
* current configuration.
*/
list_for_each_entry(connector, &config->connector_list, head) {
drm_for_each_connector(connector, state->dev) {
if (connector->state->crtc != crtc)
continue;
 
515,6 → 1109,45
EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
 
/**
* drm_atomic_add_affected_planes - add planes for crtc
* @state: atomic state
* @crtc: DRM crtc
*
* This function walks the current configuration and adds all planes
* currently used by @crtc to the atomic configuration @state. This is useful
* when an atomic commit also needs to check all currently enabled plane on
* @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
* to avoid special code to force-enable all planes.
*
* Since acquiring a plane state will always also acquire the w/w mutex of the
* current CRTC for that plane (if there is any) adding all the plane states for
* a CRTC will not reduce parallism of atomic updates.
*
* Returns:
* 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
* then the w/w mutex code has detected a deadlock and the entire atomic
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct drm_plane *plane;
 
WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
 
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
 
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_add_affected_planes);
 
/**
* drm_atomic_connectors_for_crtc - count number of connected outputs
* @state: atomic state
* @crtc: DRM crtc
526,18 → 1159,17
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
int i, num_connected_connectors = 0;
 
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector;
struct drm_connector_state *conn_state;
 
conn_state = state->connector_states[i];
int i, num_connected_connectors = 0;
 
if (conn_state && conn_state->crtc == crtc)
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == crtc)
num_connected_connectors++;
}
 
DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
state, num_connected_connectors, crtc->base.id);
 
return num_connected_connectors;
583,15 → 1215,49
*/
int drm_atomic_check_only(struct drm_atomic_state *state)
{
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i, ret = 0;
 
DRM_DEBUG_KMS("checking %p\n", state);
DRM_DEBUG_ATOMIC("checking %p\n", state);
 
for_each_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_plane_check(plane, plane_state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n",
plane->base.id);
return ret;
}
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = drm_atomic_crtc_check(crtc, crtc_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n",
crtc->base.id);
return ret;
}
}
 
if (config->funcs->atomic_check)
return config->funcs->atomic_check(state->dev, state);
else
return 0;
ret = config->funcs->atomic_check(state->dev, state);
 
if (!state->allow_modeset) {
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
crtc->base.id);
return -EINVAL;
}
}
}
 
return ret;
}
EXPORT_SYMBOL(drm_atomic_check_only);
 
/**
619,7 → 1285,7
if (ret)
return ret;
 
DRM_DEBUG_KMS("commiting %p\n", state);
DRM_DEBUG_ATOMIC("commiting %p\n", state);
 
return config->funcs->atomic_commit(state->dev, state, false);
}
650,66 → 1316,157
if (ret)
return ret;
 
DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
 
return config->funcs->atomic_commit(state->dev, state, true);
}
EXPORT_SYMBOL(drm_atomic_async_commit);
 
/**
* drm_atomic_helper_plane_duplicate_state - default state duplicate hook
* @plane: drm plane
*
* Default plane state duplicate hook for drivers which don't have their own
* subclassed plane state structure.
/*
* The big monstor ioctl
*/
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
 
static struct drm_pending_vblank_event *create_vblank_event(
struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
{
struct drm_plane_state *state;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
 
if (WARN_ON(!plane->state))
return NULL;
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
file_priv->event_space -= sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
 
state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
e = kzalloc(sizeof *e, GFP_KERNEL);
if (e == NULL) {
spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
 
if (state && state->fb)
drm_framebuffer_reference(state->fb);
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof e->event;
e->event.user_data = user_data;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
 
return state;
out:
return e;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
 
static void destroy_vblank_event(struct drm_device *dev,
struct drm_file *file_priv, struct drm_pending_vblank_event *e)
{
unsigned long flags;
 
/**
* drm_atomic_helper_crtc_destroy_state - default state destroy hook
* @crtc: drm CRTC
* @state: CRTC state object to release
*
* Default CRTC state destroy hook for drivers which don't have their own
* subclassed CRTC state structure.
*/
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
 
static int atomic_set_prop(struct drm_atomic_state *state,
struct drm_mode_object *obj, struct drm_property *prop,
uint64_t prop_value)
{
kfree(state);
struct drm_mode_object *ref;
int ret;
 
if (!drm_property_change_valid_get(prop, prop_value, &ref))
return -EINVAL;
 
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR: {
struct drm_connector *connector = obj_to_connector(obj);
struct drm_connector_state *connector_state;
 
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
ret = PTR_ERR(connector_state);
break;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
 
ret = drm_atomic_connector_set_property(connector,
connector_state, prop, prop_value);
break;
}
case DRM_MODE_OBJECT_CRTC: {
struct drm_crtc *crtc = obj_to_crtc(obj);
struct drm_crtc_state *crtc_state;
 
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
break;
}
 
ret = drm_atomic_crtc_set_property(crtc,
crtc_state, prop, prop_value);
break;
}
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
 
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
break;
}
 
ret = drm_atomic_plane_set_property(plane,
plane_state, prop, prop_value);
break;
}
default:
ret = -EINVAL;
break;
}
 
drm_property_change_valid_put(prop, ref);
return ret;
}
 
/**
* drm_atomic_helper_plane_destroy_state - default state destroy hook
* @plane: drm plane
* @state: plane state object to release
* drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers.
*
* Default plane state destroy hook for drivers which don't have their own
* subclassed plane state structure.
* @dev: drm device to check.
* @plane_mask: plane mask for planes that were updated.
* @ret: return value, can be -EDEADLK for a retry.
*
* Before doing an update plane->old_fb is set to plane->fb,
* but before dropping the locks old_fb needs to be set to NULL
* and plane->fb updated. This is a common operation for each
* atomic update, so this call is split off as a helper.
*/
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
void drm_atomic_clean_old_fb(struct drm_device *dev,
unsigned plane_mask,
int ret)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
struct drm_plane *plane;
 
kfree(state);
/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
* locks (ie. while it is still safe to deref plane->state). We
* need to do this here because the driver entry points cannot
* distinguish between legacy and atomic ioctls.
*/
drm_for_each_plane_mask(plane, dev, plane_mask) {
if (ret == 0) {
struct drm_framebuffer *new_fb = plane->state->fb;
if (new_fb)
drm_framebuffer_reference(new_fb);
plane->fb = new_fb;
plane->crtc = plane->state->crtc;
 
if (plane->old_fb)
drm_framebuffer_unreference(plane->old_fb);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
plane->old_fb = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_clean_old_fb);
/drivers/video/drm/drm_atomic_helper.c
0,0 → 1,2540
/*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
 
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <linux/fence.h>
 
/**
* DOC: overview
*
* This helper library provides implementations of check and commit functions on
* top of the CRTC modeset helper callbacks and the plane helper callbacks. It
* also provides convenience implementations for the atomic state handling
* callbacks for drivers which don't need to subclass the drm core structures to
* add their own additional internal state.
*
* This library also provides default implementations for the check callback in
* drm_atomic_helper_check() and for the commit callback with
* drm_atomic_helper_commit(). But the individual stages and callbacks are
* exposed to allow drivers to mix and match and e.g. use the plane helpers only
* together with a driver private modeset implementation.
*
* This library also provides implementations for all the legacy driver
* interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
* drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
* various functions to implement set_property callbacks. New drivers must not
* implement these functions themselves but must use the provided helpers.
*/
static void
drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_plane_state *plane_state,
struct drm_plane *plane)
{
struct drm_crtc_state *crtc_state;
 
if (plane->state->crtc) {
crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
 
if (WARN_ON(!crtc_state))
return;
 
crtc_state->planes_changed = true;
}
 
if (plane_state->crtc) {
crtc_state =
state->crtc_states[drm_crtc_index(plane_state->crtc)];
 
if (WARN_ON(!crtc_state))
return;
 
crtc_state->planes_changed = true;
}
}
 
static struct drm_crtc *
get_current_crtc_for_encoder(struct drm_device *dev,
struct drm_encoder *encoder)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_connector *connector;
 
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
drm_for_each_connector(connector, dev) {
if (connector->state->best_encoder != encoder)
continue;
 
return connector->state->crtc;
}
 
return NULL;
}
 
static int
steal_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder,
struct drm_crtc *encoder_crtc)
{
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int ret;
 
/*
* We can only steal an encoder coming from a connector, which means we
* must already hold the connection_mutex.
*/
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
encoder->base.id, encoder->name,
encoder_crtc->base.id);
 
crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
 
crtc_state->connectors_changed = true;
 
list_for_each_entry(connector, &config->connector_list, head) {
if (connector->state->best_encoder != encoder)
continue;
 
DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
 
connector_state = drm_atomic_get_connector_state(state,
connector);
if (IS_ERR(connector_state))
return PTR_ERR(connector_state);
 
ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
if (ret)
return ret;
connector_state->best_encoder = NULL;
}
 
return 0;
}
 
static int
update_connector_routing(struct drm_atomic_state *state, int conn_idx)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
struct drm_crtc *encoder_crtc;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct drm_crtc_state *crtc_state;
int idx, ret;
 
connector = state->connectors[conn_idx];
connector_state = state->connector_states[conn_idx];
 
if (!connector)
return 0;
 
DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
 
if (connector->state->crtc != connector_state->crtc) {
if (connector->state->crtc) {
idx = drm_crtc_index(connector->state->crtc);
 
crtc_state = state->crtc_states[idx];
crtc_state->connectors_changed = true;
}
 
if (connector_state->crtc) {
idx = drm_crtc_index(connector_state->crtc);
 
crtc_state = state->crtc_states[idx];
crtc_state->connectors_changed = true;
}
}
 
if (!connector_state->crtc) {
DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
 
connector_state->best_encoder = NULL;
 
return 0;
}
 
funcs = connector->helper_private;
 
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector,
connector_state);
else
new_encoder = funcs->best_encoder(connector);
 
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
return -EINVAL;
}
 
if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) {
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n",
new_encoder->base.id,
new_encoder->name,
connector_state->crtc->base.id);
return -EINVAL;
}
 
if (new_encoder == connector_state->best_encoder) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
connector->base.id,
connector->name,
new_encoder->base.id,
new_encoder->name,
connector_state->crtc->base.id);
 
return 0;
}
 
encoder_crtc = get_current_crtc_for_encoder(state->dev,
new_encoder);
 
if (encoder_crtc) {
ret = steal_encoder(state, new_encoder, encoder_crtc);
if (ret) {
DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
return ret;
}
}
 
if (WARN_ON(!connector_state->crtc))
return -EINVAL;
 
connector_state->best_encoder = new_encoder;
idx = drm_crtc_index(connector_state->crtc);
 
crtc_state = state->crtc_states[idx];
crtc_state->connectors_changed = true;
 
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
connector->base.id,
connector->name,
new_encoder->base.id,
new_encoder->name,
connector_state->crtc->base.id);
 
return 0;
}
 
static int
mode_fixup(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int i;
bool ret;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->mode_changed &&
!crtc_state->connectors_changed)
continue;
 
drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
}
 
for_each_connector_in_state(state, connector, conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
 
WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
 
if (!conn_state->crtc || !conn_state->best_encoder)
continue;
 
crtc_state =
state->crtc_states[drm_crtc_index(conn_state->crtc)];
 
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call ->mode_fixup twice.
*/
encoder = conn_state->best_encoder;
funcs = encoder->helper_private;
if (!funcs)
continue;
 
ret = drm_bridge_mode_fixup(encoder->bridge, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
return -EINVAL;
}
 
if (funcs->atomic_check) {
ret = funcs->atomic_check(encoder, crtc_state,
conn_state);
if (ret) {
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
encoder->base.id, encoder->name);
return ret;
}
} else if (funcs->mode_fixup) {
ret = funcs->mode_fixup(encoder, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
encoder->base.id, encoder->name);
return -EINVAL;
}
}
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
 
if (!crtc_state->mode_changed &&
!crtc_state->connectors_changed)
continue;
 
funcs = crtc->helper_private;
if (!funcs->mode_fixup)
continue;
 
ret = funcs->mode_fixup(crtc, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n",
crtc->base.id);
return -EINVAL;
}
}
 
return 0;
}
 
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* This does all the crtc and connector related computations for an atomic
* update and adds any additional connectors needed for full modesets and calls
* down into ->mode_fixup functions of the driver backend.
*
* crtc_state->mode_changed is set when the input mode is changed.
* crtc_state->connectors_changed is set when a connector is added or
* removed from the crtc.
* crtc_state->active_changed is set when crtc_state->active changes,
* which is used for dpms.
*
* IMPORTANT:
*
* Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
* plane update can't be done without a full modeset) _must_ call this function
* afterwards after that change. It is permitted to call this function multiple
* times for the same update, e.g. when the ->atomic_check functions depend upon
* the adjusted dotclock for fifo space allocation and watermark computation.
*
* RETURNS
* Zero for success or -errno
*/
int
drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i, ret;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n",
crtc->base.id);
crtc_state->mode_changed = true;
}
 
if (crtc->state->enable != crtc_state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
crtc->base.id);
 
/*
* For clarity this assignment is done here, but
* enable == 0 is only true when there are no
* connectors and a NULL mode.
*
* The other way around is true as well. enable != 0
* iff connectors are attached and a mode is set.
*/
crtc_state->mode_changed = true;
crtc_state->connectors_changed = true;
}
}
 
for_each_connector_in_state(state, connector, connector_state, i) {
/*
* This only sets crtc->mode_changed for routing changes,
* drivers must set crtc->mode_changed themselves when connector
* properties need to be updated.
*/
ret = update_connector_routing(state, i);
if (ret)
return ret;
}
 
/*
* After all the routing has been prepared we need to add in any
* connector which is itself unchanged, but who's crtc changes it's
* configuration. This must be done before calling mode_fixup in case a
* crtc only changed its mode but has the same set of connectors.
*/
for_each_crtc_in_state(state, crtc, crtc_state, i) {
int num_connectors;
 
/*
* We must set ->active_changed after walking connectors for
* otherwise an update that only changes active would result in
* a full modeset because update_connector_routing force that.
*/
if (crtc->state->active != crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n",
crtc->base.id);
crtc_state->active_changed = true;
}
 
if (!drm_atomic_crtc_needs_modeset(crtc_state))
continue;
 
DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
crtc->base.id,
crtc_state->enable ? 'y' : 'n',
crtc_state->active ? 'y' : 'n');
 
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret != 0)
return ret;
 
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
 
num_connectors = drm_atomic_connectors_for_crtc(state,
crtc);
 
if (crtc_state->enable != !!num_connectors) {
DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
crtc->base.id);
 
return -EINVAL;
}
}
 
return mode_fixup(state);
}
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
 
/**
* drm_atomic_helper_check_planes - validate state object for planes changes
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* This does all the plane update related checks using by calling into the
* ->atomic_check hooks provided by the driver.
*
* It also sets crtc_state->planes_changed to indicate that a crtc has
* updated planes.
*
* RETURNS
* Zero for success or -errno
*/
int
drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
int i, ret = 0;
 
for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
 
funcs = plane->helper_private;
 
drm_atomic_helper_plane_changed(state, plane_state, plane);
 
if (!funcs || !funcs->atomic_check)
continue;
 
ret = funcs->atomic_check(plane, plane_state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n",
plane->base.id);
return ret;
}
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
 
funcs = crtc->helper_private;
 
if (!funcs || !funcs->atomic_check)
continue;
 
ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n",
crtc->base.id);
return ret;
}
}
 
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check_planes);
 
/**
* drm_atomic_helper_check - validate state object
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* Only crtcs and planes have check callbacks, so for any additional (global)
* checking that a driver needs it can simply wrap that around this function.
* Drivers without such needs can directly use this as their ->atomic_check()
* callback.
*
* This just wraps the two parts of the state checking for planes and modeset
* state in the default order: First it calls drm_atomic_helper_check_modeset()
* and then drm_atomic_helper_check_planes(). The assumption is that the
* ->atomic_check functions depend upon an updated adjusted_mode.clock to
* e.g. properly compute watermarks.
*
* RETURNS
* Zero for success or -errno
*/
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
 
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
 
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
 
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
 
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
 
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_crtc_state *old_crtc_state;
 
/* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state. */
if (!old_conn_state->crtc)
continue;
 
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
 
if (!old_crtc_state->active ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
continue;
 
encoder = old_conn_state->best_encoder;
 
/* We shouldn't get this far if we didn't previously have
* an encoder.. but WARN_ON() rather than explode.
*/
if (WARN_ON(!encoder))
continue;
 
funcs = encoder->helper_private;
 
DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
 
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
*/
drm_bridge_disable(encoder->bridge);
 
/* Right function depends upon target state. */
if (connector->state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
funcs->disable(encoder);
else
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
 
drm_bridge_post_disable(encoder->bridge);
}
 
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
 
/* Shut down everything that needs a full modeset. */
if (!drm_atomic_crtc_needs_modeset(crtc->state))
continue;
 
if (!old_crtc_state->active)
continue;
 
funcs = crtc->helper_private;
 
DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
crtc->base.id);
 
 
/* Right function depends upon target state. */
if (crtc->state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->disable)
funcs->disable(crtc);
else
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
}
 
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function updates all the various legacy modeset state pointers in
* connectors, encoders and crtcs. It also updates the timestamping constants
* used for precise vblank timestamps by calling
* drm_calc_timestamping_constants().
*
* Drivers can use this for building their own atomic commit if they don't have
* a pure helper-based modeset implementation.
*/
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
 
/* clear out existing links and update dpms */
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
if (connector->encoder) {
WARN_ON(!connector->encoder->crtc);
 
connector->encoder->crtc = NULL;
connector->encoder = NULL;
}
 
crtc = connector->state->crtc;
if ((!crtc && old_conn_state->crtc) ||
(crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
struct drm_property *dpms_prop =
dev->mode_config.dpms_property;
int mode = DRM_MODE_DPMS_OFF;
 
if (crtc && crtc->state->active)
mode = DRM_MODE_DPMS_ON;
 
connector->dpms = mode;
drm_object_property_set_value(&connector->base,
dpms_prop, mode);
}
}
 
/* set new links */
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
if (!connector->state->crtc)
continue;
 
if (WARN_ON(!connector->state->best_encoder))
continue;
 
connector->encoder = connector->state->best_encoder;
connector->encoder->crtc = connector->state->crtc;
}
 
/* set legacy state in the crtc structure */
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
struct drm_plane *primary = crtc->primary;
 
crtc->mode = crtc->state->mode;
crtc->enabled = crtc->state->enable;
 
if (drm_atomic_get_existing_plane_state(old_state, primary) &&
primary->state->crtc == crtc) {
crtc->x = primary->state->src_x >> 16;
crtc->y = primary->state->src_y >> 16;
}
 
if (crtc->state->enable)
drm_calc_timestamping_constants(crtc,
&crtc->state->adjusted_mode);
}
}
EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
 
static void
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state;
int i;
 
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
 
if (!crtc->state->mode_changed)
continue;
 
funcs = crtc->helper_private;
 
if (crtc->state->enable && funcs->mode_set_nofb) {
DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
crtc->base.id);
 
funcs->mode_set_nofb(crtc);
}
}
 
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
 
if (!connector->state->best_encoder)
continue;
 
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
new_crtc_state = connector->state->crtc->state;
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
 
if (!new_crtc_state->mode_changed)
continue;
 
DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
 
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call mode_set hooks twice.
*/
if (funcs->mode_set)
funcs->mode_set(encoder, mode, adjusted_mode);
 
drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
}
}
 
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
*
* For compatibility with legacy crtc helpers this should be called before
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
disable_outputs(dev, old_state);
 
drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
 
crtc_set_mode(dev, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
 
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function enables all the outputs with the new configuration which had to
* be turned off for the update.
*
* For compatibility with legacy crtc helpers this should be called after
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state;
int i;
 
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
 
/* Need to filter out CRTCs where only planes change. */
if (!drm_atomic_crtc_needs_modeset(crtc->state))
continue;
 
if (!crtc->state->active)
continue;
 
funcs = crtc->helper_private;
 
if (crtc->state->enable) {
DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
crtc->base.id);
 
if (funcs->enable)
funcs->enable(crtc);
else
funcs->commit(crtc);
}
}
 
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
 
if (!connector->state->best_encoder)
continue;
 
if (!connector->state->crtc->state->active ||
!drm_atomic_crtc_needs_modeset(connector->state->crtc->state))
continue;
 
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
 
DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
 
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
*/
drm_bridge_pre_enable(encoder->bridge);
 
if (funcs->enable)
funcs->enable(encoder);
else
funcs->commit(encoder);
 
drm_bridge_enable(encoder->bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
 
static void wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_plane_state *plane_state;
int i;
 
for_each_plane_in_state(state, plane, plane_state, i) {
if (!plane->state->fence)
continue;
 
WARN_ON(!plane->state->fb);
 
fence_wait(plane->state->fence, false);
fence_put(plane->state->fence);
plane->state->fence = NULL;
}
}
 
static bool framebuffer_changed(struct drm_device *dev,
struct drm_atomic_state *old_state,
struct drm_crtc *crtc)
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
int i;
 
for_each_plane_in_state(old_state, plane, old_plane_state, i) {
if (plane->state->crtc != crtc &&
old_plane_state->crtc != crtc)
continue;
 
if (plane->state->fb != old_plane_state->fb)
return true;
}
 
return false;
}
 
/**
* drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* Helper to, after atomic commit, wait for vblanks on all effected
* crtcs (ie. before cleaning up old framebuffers using
* drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the
* framebuffers have actually changed to optimize for the legacy cursor and
* plane update use-case.
*/
void
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i, ret;
 
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
/* No one cares about the old state, so abuse it for tracking
* and store whether we hold a vblank reference (and should do a
* vblank wait) in the ->enable boolean. */
old_crtc_state->enable = false;
 
if (!crtc->state->enable)
continue;
 
/* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates). */
if (old_state->legacy_cursor_update)
continue;
 
if (!framebuffer_changed(dev, old_state, crtc))
continue;
 
ret = drm_crtc_vblank_get(crtc);
if (ret != 0)
continue;
 
old_crtc_state->enable = true;
old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc);
}
 
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
if (!old_crtc_state->enable)
continue;
 
ret = wait_event_timeout(dev->vblank[i].queue,
old_crtc_state->last_vblank_count !=
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
 
drm_crtc_vblank_put(crtc);
}
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
 
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
* @async: asynchronous commit
*
* This function commits a with drm_atomic_helper_check() pre-validated state
* object. This can still fail when e.g. the framebuffer reservation fails. For
* now this doesn't implement asynchronous commits.
*
* Note that right now this function does not support async commits, and hence
* driver writers must implement their own version for now. Also note that the
* default ordering of how the various stages are called is to match the legacy
* modeset helper library closest. One peculiarity of that is that it doesn't
* mesh well with runtime PM at all.
*
* For drivers supporting runtime PM the recommended sequence is
*
* drm_atomic_helper_commit_modeset_disables(dev, state);
*
* drm_atomic_helper_commit_modeset_enables(dev, state);
*
* drm_atomic_helper_commit_planes(dev, state, true);
*
* See the kerneldoc entries for these three functions for more details.
*
* RETURNS
* Zero for success or -errno.
*/
int drm_atomic_helper_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
int ret;
 
if (async)
return -EBUSY;
 
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
 
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
*/
 
drm_atomic_helper_swap_state(dev, state);
 
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one condition: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state().
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout.
*/
 
wait_for_fences(dev, state);
 
drm_atomic_helper_commit_modeset_disables(dev, state);
 
drm_atomic_helper_commit_planes(dev, state, false);
 
drm_atomic_helper_commit_modeset_enables(dev, state);
 
// drm_atomic_helper_wait_for_vblanks(dev, state);
 
drm_atomic_helper_cleanup_planes(dev, state);
 
drm_atomic_state_free(state);
 
return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_commit);
 
/**
* DOC: implementing async commit
*
* For now the atomic helpers don't support async commit directly. If there is
* real need it could be added though, using the dma-buf fence infrastructure
* for generic synchronization with outstanding rendering.
*
* For now drivers have to implement async commit themselves, with the following
* sequence being the recommended one:
*
* 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
* which commit needs to call which can fail, so we want to run it first and
* synchronously.
*
* 2. Synchronize with any outstanding asynchronous commit worker threads which
* might be affected the new state update. This can be done by either cancelling
* or flushing the work items, depending upon whether the driver can deal with
* cancelled updates. Note that it is important to ensure that the framebuffer
* cleanup is still done when cancelling.
*
* For sufficient parallelism it is recommended to have a work item per crtc
* (for updates which don't touch global state) and a global one. Then we only
* need to synchronize with the crtc work items for changed crtcs and the global
* work item, which allows nice concurrent updates on disjoint sets of crtcs.
*
* 3. The software state is updated synchronously with
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
* locks means concurrent callers never see inconsistent state. And doing this
* while it's guaranteed that no relevant async worker runs means that async
* workers do not need grab any locks. Actually they must not grab locks, for
* otherwise the work flushing will deadlock.
*
* 4. Schedule a work item to do all subsequent steps, using the split-out
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
* then cleaning up the framebuffers after the old framebuffer is no longer
* being displayed.
*/
 
/**
* drm_atomic_helper_prepare_planes - prepare plane resources before commit
* @dev: DRM device
* @state: atomic state object with new state structures
*
* This function prepares plane state, specifically framebuffers, for the new
* configuration. If any failure is encountered this function will call
* ->cleanup_fb on any already successfully prepared framebuffer.
*
* Returns:
* 0 on success, negative error code on failure.
*/
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
int nplanes = dev->mode_config.num_total_plane;
int ret, i;
 
for (i = 0; i < nplanes; i++) {
const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane = state->planes[i];
struct drm_plane_state *plane_state = state->plane_states[i];
 
if (!plane)
continue;
 
funcs = plane->helper_private;
 
if (funcs->prepare_fb) {
ret = funcs->prepare_fb(plane, plane_state);
if (ret)
goto fail;
}
}
 
return 0;
 
fail:
for (i--; i >= 0; i--) {
const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane = state->planes[i];
struct drm_plane_state *plane_state = state->plane_states[i];
 
if (!plane)
continue;
 
funcs = plane->helper_private;
 
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, plane_state);
 
}
 
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
 
bool plane_crtc_active(struct drm_plane_state *state)
{
return state->crtc && state->crtc->state->active;
}
 
/**
* drm_atomic_helper_commit_planes - commit plane state
* @dev: DRM device
* @old_state: atomic state object with old state structures
* @active_only: Only commit on active CRTC if set
*
* This function commits the new plane state using the plane and atomic helper
* functions for planes and crtcs. It assumes that the atomic state has already
* been pushed into the relevant object state pointers, since this step can no
* longer fail.
*
* It still requires the global state object @old_state to know which planes and
* crtcs need to be updated though.
*
* Note that this function does all plane updates across all CRTCs in one step.
* If the hardware can't support this approach look at
* drm_atomic_helper_commit_planes_on_crtc() instead.
*
* Plane parameters can be updated by applications while the associated CRTC is
* disabled. The DRM/KMS core will store the parameters in the plane state,
* which will be available to the driver when the CRTC is turned on. As a result
* most drivers don't need to be immediately notified of plane updates for a
* disabled CRTC.
*
* Unless otherwise needed, drivers are advised to set the @active_only
* parameters to true in order not to receive plane update notifications related
* to a disabled CRTC. This avoids the need to manually ignore plane updates in
* driver code when the driver and/or hardware can't or just don't need to deal
* with updates on disabled CRTCs, for example when supporting runtime PM.
*
* The drm_atomic_helper_commit() default implementation only sets @active_only
* to false to most closely match the behaviour of the legacy helpers. This should
* not be copied blindly by drivers.
*/
void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *old_state,
bool active_only)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
int i;
 
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
 
funcs = crtc->helper_private;
 
if (!funcs || !funcs->atomic_begin)
continue;
 
if (active_only && !crtc->state->active)
continue;
 
funcs->atomic_begin(crtc, old_crtc_state);
}
 
for_each_plane_in_state(old_state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
bool disabling;
 
funcs = plane->helper_private;
 
if (!funcs)
continue;
 
disabling = drm_atomic_plane_disabling(plane, old_plane_state);
 
if (active_only) {
/*
* Skip planes related to inactive CRTCs. If the plane
* is enabled use the state of the current CRTC. If the
* plane is being disabled use the state of the old
* CRTC to avoid skipping planes being disabled on an
* active CRTC.
*/
if (!disabling && !plane_crtc_active(plane->state))
continue;
if (disabling && !plane_crtc_active(old_plane_state))
continue;
}
 
/*
* Special-case disabling the plane if drivers support it.
*/
if (disabling && funcs->atomic_disable)
funcs->atomic_disable(plane, old_plane_state);
else if (plane->state->crtc || disabling)
funcs->atomic_update(plane, old_plane_state);
}
 
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
 
funcs = crtc->helper_private;
 
if (!funcs || !funcs->atomic_flush)
continue;
 
if (active_only && !crtc->state->active)
continue;
 
funcs->atomic_flush(crtc, old_crtc_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
 
/**
* drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
* @old_crtc_state: atomic state object with the old crtc state
*
* This function commits the new plane state using the plane and atomic helper
* functions for planes on the specific crtc. It assumes that the atomic state
* has already been pushed into the relevant object state pointers, since this
* step can no longer fail.
*
* This function is useful when plane updates should be done crtc-by-crtc
* instead of one global step like drm_atomic_helper_commit_planes() does.
*
* This function can only be savely used when planes are not allowed to move
* between different CRTCs because this function doesn't handle inter-CRTC
* depencies. Callers need to ensure that either no such depencies exist,
* resolve them through ordering of commit calls or through some other means.
*/
void
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
{
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_crtc *crtc = old_crtc_state->crtc;
struct drm_atomic_state *old_state = old_crtc_state->state;
struct drm_plane *plane;
unsigned plane_mask;
 
plane_mask = old_crtc_state->plane_mask;
plane_mask |= crtc->state->plane_mask;
 
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->atomic_begin)
crtc_funcs->atomic_begin(crtc, old_crtc_state);
 
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
struct drm_plane_state *old_plane_state =
drm_atomic_get_existing_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
 
plane_funcs = plane->helper_private;
 
if (!old_plane_state || !plane_funcs)
continue;
 
WARN_ON(plane->state->crtc && plane->state->crtc != crtc);
 
if (drm_atomic_plane_disabling(plane, old_plane_state) &&
plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, old_plane_state);
else if (plane->state->crtc ||
drm_atomic_plane_disabling(plane, old_plane_state))
plane_funcs->atomic_update(plane, old_plane_state);
}
 
if (crtc_funcs && crtc_funcs->atomic_flush)
crtc_funcs->atomic_flush(crtc, old_crtc_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
 
/**
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function cleans up plane state, specifically framebuffers, from the old
* configuration. Hence the old configuration must be perserved in @old_state to
* be able to call this function.
*
* This function must also be called on the new state when the atomic update
* fails at any point after calling drm_atomic_helper_prepare_planes().
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_plane *plane;
struct drm_plane_state *plane_state;
int i;
 
for_each_plane_in_state(old_state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
 
funcs = plane->helper_private;
 
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
 
/**
* drm_atomic_helper_swap_state - store atomic state into current sw state
* @dev: DRM device
* @state: atomic state
*
* This function stores the atomic state into the current state pointers in all
* driver objects. It should be called after all failing steps have been done
* and succeeded, but before the actual hardware state is committed.
*
* For cleanup and error recovery the current state for all changed objects will
* be swaped into @state.
*
* With that sequence it fits perfectly into the plane prepare/cleanup sequence:
*
* 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
*
* 2. Do any other steps that might fail.
*
* 3. Put the staged state into the current state pointers with this function.
*
* 4. Actually commit the hardware state.
*
* 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
* contains the old state. Also do any other cleanup required with that state.
*/
void drm_atomic_helper_swap_state(struct drm_device *dev,
struct drm_atomic_state *state)
{
int i;
 
for (i = 0; i < dev->mode_config.num_connector; i++) {
struct drm_connector *connector = state->connectors[i];
 
if (!connector)
continue;
 
connector->state->state = state;
swap(state->connector_states[i], connector->state);
connector->state->state = NULL;
}
 
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct drm_crtc *crtc = state->crtcs[i];
 
if (!crtc)
continue;
 
crtc->state->state = state;
swap(state->crtc_states[i], crtc->state);
crtc->state->state = NULL;
}
 
for (i = 0; i < dev->mode_config.num_total_plane; i++) {
struct drm_plane *plane = state->planes[i];
 
if (!plane)
continue;
 
plane->state->state = state;
swap(state->plane_states[i], plane->state);
plane->state->state = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_helper_swap_state);
 
/**
* drm_atomic_helper_update_plane - Helper for primary plane update using atomic
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
* @crtc_x: x offset of primary plane on crtc
* @crtc_y: y offset of primary plane on crtc
* @crtc_w: width of primary plane rectangle on crtc
* @crtc_h: height of primary plane rectangle on crtc
* @src_x: x offset of @fb for panning
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
* @src_h: height of source rectangle in @fb
*
* Provides a default plane update handler using the atomic driver interface.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_helper_update_plane(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
int ret = 0;
 
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
 
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto fail;
}
 
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
plane_state->crtc_x = crtc_x;
plane_state->crtc_y = crtc_y;
plane_state->crtc_h = crtc_h;
plane_state->crtc_w = crtc_w;
plane_state->src_x = src_x;
plane_state->src_y = src_y;
plane_state->src_h = src_h;
plane_state->src_w = src_w;
 
if (plane == crtc->cursor)
state->legacy_cursor_update = true;
 
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
 
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
 
drm_atomic_state_free(state);
 
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
/*
* Someone might have exchanged the framebuffer while we dropped locks
* in the backoff code. We need to fix up the fb refcount tracking the
* core does for us.
*/
plane->old_fb = plane->fb;
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_update_plane);
 
/**
* drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
* @plane: plane to disable
*
* Provides a default plane disable handler using the atomic driver interface.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_helper_disable_plane(struct drm_plane *plane)
{
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
int ret = 0;
 
/*
* FIXME: Without plane->crtc set we can't get at the implicit legacy
* acquire context. The real fix will be to wire the acquire ctx through
* everywhere we need it, but meanwhile prevent chaos by just skipping
* this noop. The critical case is the cursor ioctls which a) only grab
* crtc/cursor-plane locks (so we need the crtc to get at the right
* acquire context) and b) can try to disable the plane multiple times.
*/
if (!plane->crtc)
return 0;
 
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
 
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(plane->crtc);
retry:
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto fail;
}
 
if (plane_state->crtc && (plane == plane->crtc->cursor))
plane_state->state->legacy_cursor_update = true;
 
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
if (ret != 0)
goto fail;
 
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
 
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
 
drm_atomic_state_free(state);
 
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
/*
* Someone might have exchanged the framebuffer while we dropped locks
* in the backoff code. We need to fix up the fb refcount tracking the
* core does for us.
*/
plane->old_fb = plane->fb;
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
 
/* just used from fb-helper and atomic-helper: */
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
int ret;
 
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret != 0)
return ret;
 
drm_atomic_set_fb_for_plane(plane_state, NULL);
plane_state->crtc_x = 0;
plane_state->crtc_y = 0;
plane_state->crtc_h = 0;
plane_state->crtc_w = 0;
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_h = 0;
plane_state->src_w = 0;
 
return 0;
}
 
static int update_output_state(struct drm_atomic_state *state,
struct drm_mode_set *set)
{
struct drm_device *dev = set->crtc->dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
int ret, i, j;
 
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
state->acquire_ctx);
if (ret)
return ret;
 
/* First grab all affected connector/crtc states. */
for (i = 0; i < set->num_connectors; i++) {
conn_state = drm_atomic_get_connector_state(state,
set->connectors[i]);
if (IS_ERR(conn_state))
return PTR_ERR(conn_state);
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
return ret;
}
 
/* Then recompute connector->crtc links and crtc enabling state. */
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == set->crtc) {
ret = drm_atomic_set_crtc_for_connector(conn_state,
NULL);
if (ret)
return ret;
}
 
for (j = 0; j < set->num_connectors; j++) {
if (set->connectors[j] == connector) {
ret = drm_atomic_set_crtc_for_connector(conn_state,
set->crtc);
if (ret)
return ret;
break;
}
}
}
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
/* Don't update ->enable for the CRTC in the set_config request,
* since a mismatch would indicate a bug in the upper layers.
* The actual modeset code later on will catch any
* inconsistencies here. */
if (crtc == set->crtc)
continue;
 
if (!drm_atomic_connectors_for_crtc(state, crtc)) {
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
NULL);
if (ret < 0)
return ret;
 
crtc_state->active = false;
}
}
 
return 0;
}
 
/**
* drm_atomic_helper_set_config - set a new config from userspace
* @set: mode set configuration
*
* Provides a default crtc set_config handler using the atomic driver interface.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
int drm_atomic_helper_set_config(struct drm_mode_set *set)
{
struct drm_atomic_state *state;
struct drm_crtc *crtc = set->crtc;
int ret = 0;
 
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
return -ENOMEM;
 
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
ret = __drm_atomic_helper_set_config(set, state);
if (ret != 0)
goto fail;
 
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
 
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
 
drm_atomic_state_free(state);
 
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
/*
* Someone might have exchanged the framebuffer while we dropped locks
* in the backoff code. We need to fix up the fb refcount tracking the
* core does for us.
*/
crtc->primary->old_fb = crtc->primary->fb;
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_set_config);
 
/* just used from fb-helper and atomic-helper: */
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state;
struct drm_plane_state *primary_state;
struct drm_crtc *crtc = set->crtc;
int hdisplay, vdisplay;
int ret;
 
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
 
primary_state = drm_atomic_get_plane_state(state, crtc->primary);
if (IS_ERR(primary_state))
return PTR_ERR(primary_state);
 
if (!set->mode) {
WARN_ON(set->fb);
WARN_ON(set->num_connectors);
 
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
if (ret != 0)
return ret;
 
crtc_state->active = false;
 
ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
if (ret != 0)
return ret;
 
drm_atomic_set_fb_for_plane(primary_state, NULL);
 
goto commit;
}
 
WARN_ON(!set->fb);
WARN_ON(!set->num_connectors);
 
ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
if (ret != 0)
return ret;
 
crtc_state->active = true;
 
ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
if (ret != 0)
return ret;
 
drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
 
drm_atomic_set_fb_for_plane(primary_state, set->fb);
primary_state->crtc_x = 0;
primary_state->crtc_y = 0;
primary_state->crtc_h = vdisplay;
primary_state->crtc_w = hdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
primary_state->src_h = hdisplay << 16;
primary_state->src_w = vdisplay << 16;
} else {
primary_state->src_h = vdisplay << 16;
primary_state->src_w = hdisplay << 16;
}
 
commit:
ret = update_output_state(state, set);
if (ret)
return ret;
 
return 0;
}
 
/**
* drm_atomic_helper_crtc_set_property - helper for crtc properties
* @crtc: DRM crtc
* @property: DRM property
* @val: value of property
*
* Provides a default crtc set_property handler using the atomic driver
* interface.
*
* RETURNS:
* Zero on success, error code on failure
*/
int
drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
int ret = 0;
 
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
return -ENOMEM;
 
/* ->set_property is always called with all locks held. */
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
}
 
ret = drm_atomic_crtc_set_property(crtc, crtc_state,
property, val);
if (ret)
goto fail;
 
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
 
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
 
drm_atomic_state_free(state);
 
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
 
/**
* drm_atomic_helper_plane_set_property - helper for plane properties
* @plane: DRM plane
* @property: DRM property
* @val: value of property
*
* Provides a default plane set_property handler using the atomic driver
* interface.
*
* RETURNS:
* Zero on success, error code on failure
*/
int
drm_atomic_helper_plane_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t val)
{
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
int ret = 0;
 
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
 
/* ->set_property is always called with all locks held. */
state->acquire_ctx = plane->dev->mode_config.acquire_ctx;
retry:
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto fail;
}
 
ret = drm_atomic_plane_set_property(plane, plane_state,
property, val);
if (ret)
goto fail;
 
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
 
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
 
drm_atomic_state_free(state);
 
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
 
/**
* drm_atomic_helper_connector_set_property - helper for connector properties
* @connector: DRM connector
* @property: DRM property
* @val: value of property
*
* Provides a default connector set_property handler using the atomic driver
* interface.
*
* RETURNS:
* Zero on success, error code on failure
*/
int
drm_atomic_helper_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_atomic_state *state;
struct drm_connector_state *connector_state;
int ret = 0;
 
state = drm_atomic_state_alloc(connector->dev);
if (!state)
return -ENOMEM;
 
/* ->set_property is always called with all locks held. */
state->acquire_ctx = connector->dev->mode_config.acquire_ctx;
retry:
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
ret = PTR_ERR(connector_state);
goto fail;
}
 
ret = drm_atomic_connector_set_property(connector, connector_state,
property, val);
if (ret)
goto fail;
 
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
 
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
 
drm_atomic_state_free(state);
 
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
 
/**
* drm_atomic_helper_page_flip - execute a legacy page flip
* @crtc: DRM crtc
* @fb: DRM framebuffer
* @event: optional DRM event to signal upon completion
* @flags: flip flags for non-vblank sync'ed updates
*
* Provides a default page flip implementation using the atomic driver interface.
*
* Note that for now so called async page flips (i.e. updates which are not
* synchronized to vblank) are not supported, since the atomic interfaces have
* no provisions for this yet.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags)
{
struct drm_plane *plane = crtc->primary;
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
int ret = 0;
 
if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
return -EINVAL;
 
state = drm_atomic_state_alloc(plane->dev);
if (!state)
return -ENOMEM;
 
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
}
crtc_state->event = event;
 
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto fail;
}
 
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
 
ret = drm_atomic_async_commit(state);
if (ret != 0)
goto fail;
 
/* Driver takes ownership of state on successful async commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
 
drm_atomic_state_free(state);
 
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
/*
* Someone might have exchanged the framebuffer while we dropped locks
* in the backoff code. We need to fix up the fb refcount tracking the
* core does for us.
*/
plane->old_fb = plane->fb;
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_page_flip);
 
/**
* drm_atomic_helper_connector_dpms() - connector dpms helper implementation
* @connector: affected connector
* @mode: DPMS mode
*
* This is the main helper function provided by the atomic helper framework for
* implementing the legacy DPMS connector interface. It computes the new desired
* ->active state for the corresponding CRTC (if the connector is enabled) and
* updates it.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
int mode)
{
struct drm_mode_config *config = &connector->dev->mode_config;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
struct drm_connector *tmp_connector;
int ret;
bool active = false;
int old_mode = connector->dpms;
 
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
 
connector->dpms = mode;
crtc = connector->state->crtc;
 
if (!crtc)
return 0;
 
state = drm_atomic_state_alloc(connector->dev);
if (!state)
return -ENOMEM;
 
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
}
 
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
drm_for_each_connector(tmp_connector, connector->dev) {
if (tmp_connector->state->crtc != crtc)
continue;
 
if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
active = true;
break;
}
}
crtc_state->active = active;
 
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
 
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
 
connector->dpms = old_mode;
drm_atomic_state_free(state);
 
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
 
/**
* DOC: atomic state reset and initialization
*
* Both the drm core and the atomic helpers assume that there is always the full
* and correct atomic software state for all connectors, CRTCs and planes
* available. Which is a bit a problem on driver load and also after system
* suspend. One way to solve this is to have a hardware state read-out
* infrastructure which reconstructs the full software state (e.g. the i915
* driver).
*
* The simpler solution is to just reset the software state to everything off,
* which is easiest to do by calling drm_mode_config_reset(). To facilitate this
* the atomic helpers provide default reset implementations for all hooks.
*/
 
/**
* drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs
* @crtc: drm CRTC
*
* Resets the atomic state for @crtc by freeing the state pointer (which might
* be NULL, e.g. at driver load time) and allocating a new empty state object.
*/
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
if (crtc->state && crtc->state->mode_blob)
drm_property_unreference_blob(crtc->state->mode_blob);
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
 
if (crtc->state)
crtc->state->crtc = crtc;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
 
/**
* __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
* @crtc: CRTC object
* @state: atomic CRTC state
*
* Copies atomic state from a CRTC's current state and resets inferred values.
* This is useful for drivers that subclass the CRTC state.
*/
void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
memcpy(state, crtc->state, sizeof(*state));
 
if (state->mode_blob)
drm_property_reference_blob(state->mode_blob);
state->mode_changed = false;
state->active_changed = false;
state->planes_changed = false;
state->connectors_changed = false;
state->event = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
 
/**
* drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
* @crtc: drm CRTC
*
* Default CRTC state duplicate hook for drivers which don't have their own
* subclassed CRTC state structure.
*/
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct drm_crtc_state *state;
 
if (WARN_ON(!crtc->state))
return NULL;
 
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_crtc_duplicate_state(crtc, state);
 
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
 
/**
* __drm_atomic_helper_crtc_destroy_state - release CRTC state
* @crtc: CRTC object
* @state: CRTC state object to release
*
* Releases all resources stored in the CRTC state without actually freeing
* the memory of the CRTC state. This is useful for drivers that subclass the
* CRTC state.
*/
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
if (state->mode_blob)
drm_property_unreference_blob(state->mode_blob);
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
 
/**
* drm_atomic_helper_crtc_destroy_state - default state destroy hook
* @crtc: drm CRTC
* @state: CRTC state object to release
*
* Default CRTC state destroy hook for drivers which don't have their own
* subclassed CRTC state structure.
*/
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
__drm_atomic_helper_crtc_destroy_state(crtc, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
 
/**
* drm_atomic_helper_plane_reset - default ->reset hook for planes
* @plane: drm plane
*
* Resets the atomic state for @plane by freeing the state pointer (which might
* be NULL, e.g. at driver load time) and allocating a new empty state object.
*/
void drm_atomic_helper_plane_reset(struct drm_plane *plane)
{
if (plane->state && plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
 
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
 
if (plane->state)
plane->state->plane = plane;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
 
/**
* __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
* @plane: plane object
* @state: atomic plane state
*
* Copies atomic state from a plane's current state. This is useful for
* drivers that subclass the plane state.
*/
void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
memcpy(state, plane->state, sizeof(*state));
 
if (state->fb)
drm_framebuffer_reference(state->fb);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
 
/**
* drm_atomic_helper_plane_duplicate_state - default state duplicate hook
* @plane: drm plane
*
* Default plane state duplicate hook for drivers which don't have their own
* subclassed plane state structure.
*/
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
{
struct drm_plane_state *state;
 
if (WARN_ON(!plane->state))
return NULL;
 
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_plane_duplicate_state(plane, state);
 
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
 
/**
* __drm_atomic_helper_plane_destroy_state - release plane state
* @plane: plane object
* @state: plane state object to release
*
* Releases all resources stored in the plane state without actually freeing
* the memory of the plane state. This is useful for drivers that subclass the
* plane state.
*/
void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
 
/**
* drm_atomic_helper_plane_destroy_state - default state destroy hook
* @plane: drm plane
* @state: plane state object to release
*
* Default plane state destroy hook for drivers which don't have their own
* subclassed plane state structure.
*/
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
__drm_atomic_helper_plane_destroy_state(plane, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
 
/**
* drm_atomic_helper_connector_reset - default ->reset hook for connectors
* @connector: drm connector
*
* Resets the atomic state for @connector by freeing the state pointer (which
* might be NULL, e.g. at driver load time) and allocating a new empty state
* object.
*/
void drm_atomic_helper_connector_reset(struct drm_connector *connector)
{
kfree(connector->state);
connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
 
if (connector->state)
connector->state->connector = connector;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
 
/**
* __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
* @connector: connector object
* @state: atomic connector state
*
* Copies atomic state from a connector's current state. This is useful for
* drivers that subclass the connector state.
*/
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
memcpy(state, connector->state, sizeof(*state));
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
 
/**
* drm_atomic_helper_connector_duplicate_state - default state duplicate hook
* @connector: drm connector
*
* Default connector state duplicate hook for drivers which don't have their own
* subclassed connector state structure.
*/
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
{
struct drm_connector_state *state;
 
if (WARN_ON(!connector->state))
return NULL;
 
state = kmalloc(sizeof(*state), GFP_KERNEL);
if (state)
__drm_atomic_helper_connector_duplicate_state(connector, state);
 
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
 
/**
* drm_atomic_helper_duplicate_state - duplicate an atomic state object
* @dev: DRM device
* @ctx: lock acquisition context
*
* Makes a copy of the current atomic state by looping over all objects and
* duplicating their respective states.
*
* Note that this treats atomic state as persistent between save and restore.
* Drivers must make sure that this is possible and won't result in confusion
* or erroneous behaviour.
*
* Note that if callers haven't already acquired all modeset locks this might
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
*
* Returns:
* A pointer to the copy of the atomic state object on success or an
* ERR_PTR()-encoded error code on failure.
*/
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector *conn;
struct drm_plane *plane;
struct drm_crtc *crtc;
int err = 0;
 
state = drm_atomic_state_alloc(dev);
if (!state)
return ERR_PTR(-ENOMEM);
 
state->acquire_ctx = ctx;
 
drm_for_each_crtc(crtc, dev) {
struct drm_crtc_state *crtc_state;
 
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto free;
}
}
 
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
 
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
err = PTR_ERR(plane_state);
goto free;
}
}
 
drm_for_each_connector(conn, dev) {
struct drm_connector_state *conn_state;
 
conn_state = drm_atomic_get_connector_state(state, conn);
if (IS_ERR(conn_state)) {
err = PTR_ERR(conn_state);
goto free;
}
}
 
/* clear the acquire context so that it isn't accidentally reused */
state->acquire_ctx = NULL;
 
free:
if (err < 0) {
drm_atomic_state_free(state);
state = ERR_PTR(err);
}
 
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
 
/**
* __drm_atomic_helper_connector_destroy_state - release connector state
* @connector: connector object
* @state: connector state object to release
*
* Releases all resources stored in the connector state without actually
* freeing the memory of the connector state. This is useful for drivers that
* subclass the connector state.
*/
void
__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
/*
* This is currently a placeholder so that drivers that subclass the
* state will automatically do the right thing if code is ever added
* to this function.
*/
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
 
/**
* drm_atomic_helper_connector_destroy_state - default state destroy hook
* @connector: drm connector
* @state: connector state object to release
*
* Default connector state destroy hook for drivers which don't have their own
* subclassed connector state structure.
*/
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
__drm_atomic_helper_connector_destroy_state(connector, state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
/drivers/video/drm/drm_bridge.c
0,0 → 1,333
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/err.h>
#include <linux/module.h>
 
#include <drm/drm_crtc.h>
 
#include "drm/drmP.h"
 
/**
* DOC: overview
*
* drm_bridge represents a device that hangs on to an encoder. These are handy
* when a regular drm_encoder entity isn't enough to represent the entire
* encoder chain.
*
* A bridge is always associated to a single drm_encoder at a time, but can be
* either connected to it directly, or through an intermediate bridge:
*
* encoder ---> bridge B ---> bridge A
*
* Here, the output of the encoder feeds to bridge B, and that furthers feeds to
* bridge A.
*
* The driver using the bridge is responsible to make the associations between
* the encoder and bridges. Once these links are made, the bridges will
* participate along with encoder functions to perform mode_set/enable/disable
* through the ops provided in drm_bridge_funcs.
*
* drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
* crtcs, encoders or connectors. They just provide additional hooks to get the
* desired output at the end of the encoder chain.
*/
 
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
 
/**
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
*
* RETURNS:
* Unconditionally returns Zero.
*/
int drm_bridge_add(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
 
return 0;
}
EXPORT_SYMBOL(drm_bridge_add);
 
/**
* drm_bridge_remove - remove the given bridge from the global bridge list
*
* @bridge: bridge control structure
*/
void drm_bridge_remove(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
list_del_init(&bridge->list);
mutex_unlock(&bridge_lock);
}
EXPORT_SYMBOL(drm_bridge_remove);
 
/**
* drm_bridge_attach - associate given bridge to our DRM device
*
* @dev: DRM device
* @bridge: bridge control structure
*
* called by a kms driver to link one of our encoder/bridge to the given
* bridge.
*
* Note that setting up links between the bridge and our encoder/bridge
* objects needs to be handled by the kms driver itself
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
{
if (!dev || !bridge)
return -EINVAL;
 
if (bridge->dev)
return -EBUSY;
 
bridge->dev = dev;
 
if (bridge->funcs->attach)
return bridge->funcs->attach(bridge);
 
return 0;
}
EXPORT_SYMBOL(drm_bridge_attach);
 
/**
* DOC: bridge callbacks
*
* The drm_bridge_funcs ops are populated by the bridge driver. The drm
* internals(atomic and crtc helpers) use the helpers defined in drm_bridge.c
* These helpers call a specific drm_bridge_funcs op for all the bridges
* during encoder configuration.
*
* When creating a bridge driver, one can implement drm_bridge_funcs op with
* the help of these rough rules:
*
* pre_enable: this contains things needed to be done for the bridge before
* its clock and timings are enabled by its source. For a bridge, its source
* is generally the encoder or bridge just before it in the encoder chain.
*
* enable: this contains things needed to be done for the bridge once its
* source is enabled. In other words, enable is called once the source is
* ready with clock and timing needed by the bridge.
*
* disable: this contains things needed to be done for the bridge assuming
* that its source is still enabled, i.e. clock and timings are still on.
*
* post_disable: this contains things needed to be done for the bridge once
* its source is disabled, i.e. once clocks and timings are off.
*
* mode_fixup: this should fixup the given mode for the bridge. It is called
* after the encoder's mode fixup. mode_fixup can also reject a mode completely
* if it's unsuitable for the hardware.
*
* mode_set: this sets up the mode for the bridge. It assumes that its source
* (an encoder or a bridge) has set the mode too.
*/
 
/**
* drm_bridge_mode_fixup - fixup proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
* Calls 'mode_fixup' drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
*
* RETURNS:
* true on success, false on failure
*/
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
bool ret = true;
 
if (!bridge)
return true;
 
if (bridge->funcs->mode_fixup)
ret = bridge->funcs->mode_fixup(bridge, mode, adjusted_mode);
 
ret = ret && drm_bridge_mode_fixup(bridge->next, mode, adjusted_mode);
 
return ret;
}
EXPORT_SYMBOL(drm_bridge_mode_fixup);
 
/**
* drm_bridge_disable - calls 'disable' drm_bridge_funcs op for all
* bridges in the encoder chain.
* @bridge: bridge control structure
*
* Calls 'disable' drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called before
* calling the encoder's prepare op.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_disable(struct drm_bridge *bridge)
{
if (!bridge)
return;
 
drm_bridge_disable(bridge->next);
 
bridge->funcs->disable(bridge);
}
EXPORT_SYMBOL(drm_bridge_disable);
 
/**
* drm_bridge_post_disable - calls 'post_disable' drm_bridge_funcs op for
* all bridges in the encoder chain.
* @bridge: bridge control structure
*
* Calls 'post_disable' drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last. These are called
* after completing the encoder's prepare op.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_post_disable(struct drm_bridge *bridge)
{
if (!bridge)
return;
 
bridge->funcs->post_disable(bridge);
 
drm_bridge_post_disable(bridge->next);
}
EXPORT_SYMBOL(drm_bridge_post_disable);
 
/**
* drm_bridge_mode_set - set proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
* @mode: desired mode to be set for the bridge
* @adjusted_mode: updated mode that works for this bridge
*
* Calls 'mode_set' drm_bridge_funcs op for all the bridges in the
* encoder chain, starting from the first bridge to the last.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (!bridge)
return;
 
if (bridge->funcs->mode_set)
bridge->funcs->mode_set(bridge, mode, adjusted_mode);
 
drm_bridge_mode_set(bridge->next, mode, adjusted_mode);
}
EXPORT_SYMBOL(drm_bridge_mode_set);
 
/**
* drm_bridge_pre_enable - calls 'pre_enable' drm_bridge_funcs op for all
* bridges in the encoder chain.
* @bridge: bridge control structure
*
* Calls 'pre_enable' drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the last bridge to the first. These are called
* before calling the encoder's commit op.
*
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_pre_enable(struct drm_bridge *bridge)
{
if (!bridge)
return;
 
drm_bridge_pre_enable(bridge->next);
 
bridge->funcs->pre_enable(bridge);
}
EXPORT_SYMBOL(drm_bridge_pre_enable);
 
/**
* drm_bridge_enable - calls 'enable' drm_bridge_funcs op for all bridges
* in the encoder chain.
* @bridge: bridge control structure
*
* Calls 'enable' drm_bridge_funcs op for all the bridges in the encoder
* chain, starting from the first bridge to the last. These are called
* after completing the encoder's commit op.
*
* Note that the bridge passed should be the one closest to the encoder
*/
void drm_bridge_enable(struct drm_bridge *bridge)
{
if (!bridge)
return;
 
bridge->funcs->enable(bridge);
 
drm_bridge_enable(bridge->next);
}
EXPORT_SYMBOL(drm_bridge_enable);
 
#ifdef CONFIG_OF
/**
* of_drm_find_bridge - find the bridge corresponding to the device node in
* the global bridge list
*
* @np: device node
*
* RETURNS:
* drm_bridge control struct on success, NULL on failure
*/
struct drm_bridge *of_drm_find_bridge(struct device_node *np)
{
struct drm_bridge *bridge;
 
mutex_lock(&bridge_lock);
 
list_for_each_entry(bridge, &bridge_list, list) {
if (bridge->of_node == np) {
mutex_unlock(&bridge_lock);
return bridge;
}
}
 
mutex_unlock(&bridge_lock);
return NULL;
}
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
 
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
/drivers/video/drm/drm_cache.c
61,12 → 61,6
drm_clflush_page(*pages++);
mb();
}
 
static void
drm_clflush_ipi_handler(void *null)
{
wbinvd();
}
#endif
 
void
125,20 → 119,21
 
#if 0
void
drm_clflush_virt_range(char *addr, unsigned long length)
drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
char *end = addr + length;
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
addr = (void *)(((unsigned long)addr) & -size);
mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
clflush(addr);
clflush(end - 1);
for (; addr < end; addr += size)
clflushopt(addr);
mb();
return;
}
 
if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
/drivers/video/drm/drm_crtc.c
38,11 → 38,13
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
 
#include "drm_crtc_internal.h"
#include "drm_internal.h"
 
static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
static struct drm_framebuffer *
internal_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv);
 
61,8 → 63,8
/*
* Global properties
*/
static const struct drm_prop_enum_list drm_dpms_enum_list[] =
{ { DRM_MODE_DPMS_ON, "On" },
static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
{ DRM_MODE_DPMS_ON, "On" },
{ DRM_MODE_DPMS_STANDBY, "Standby" },
{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
{ DRM_MODE_DPMS_OFF, "Off" }
70,8 → 72,7
 
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
 
static const struct drm_prop_enum_list drm_plane_type_enum_list[] =
{
static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
{ DRM_PLANE_TYPE_CURSOR, "Cursor" },
80,8 → 81,7
/*
* Optional properties
*/
static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{
static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
{ DRM_MODE_SCALE_NONE, "None" },
{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
{ DRM_MODE_SCALE_CENTER, "Center" },
97,8 → 97,7
/*
* Non-global properties, but "required" for certain connectors.
*/
static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
{
static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
106,8 → 105,7
 
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
 
static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
{
static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
116,8 → 114,7
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
 
static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
{
static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
127,8 → 124,7
 
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
 
static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
{
static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
154,8 → 150,8
/*
* Connector and encoder types.
*/
static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
{ DRM_MODE_CONNECTOR_VGA, "VGA" },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
174,8 → 170,8
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
};
 
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ { DRM_MODE_ENCODER_NONE, "None" },
static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
{ DRM_MODE_ENCODER_NONE, "None" },
{ DRM_MODE_ENCODER_DAC, "DAC" },
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
185,8 → 181,7
{ DRM_MODE_ENCODER_DPMST, "DP MST" },
};
 
static const struct drm_prop_enum_list drm_subpixel_enum_list[] =
{
static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
{ SubPixelUnknown, "Unknown" },
{ SubPixelHorizontalRGB, "Horizontal RGB" },
{ SubPixelHorizontalBGR, "Horizontal BGR" },
311,8 → 306,7
* reference counted modeset objects like framebuffers.
*
* Returns:
* New unique (relative to other objects in @dev) integer identifier for the
* object.
* Zero on success, error code on failure.
*/
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
357,7 → 351,9
if (obj && obj->id != id)
obj = NULL;
/* don't leak out unref'd fb's */
if (obj && (obj->type == DRM_MODE_OBJECT_FB))
if (obj &&
(obj->type == DRM_MODE_OBJECT_FB ||
obj->type == DRM_MODE_OBJECT_BLOB))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
 
382,7 → 378,7
 
/* Framebuffers are reference counted and need their own lookup
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
WARN_ON(type == DRM_MODE_OBJECT_FB || type == DRM_MODE_OBJECT_BLOB);
obj = _object_find(dev, id, type);
return obj;
}
426,7 → 422,7
out:
mutex_unlock(&dev->mode_config.fb_lock);
 
return 0;
return ret;
}
EXPORT_SYMBOL(drm_framebuffer_init);
 
494,8 → 490,10
 
mutex_lock(&dev->mode_config.fb_lock);
fb = __drm_framebuffer_lookup(dev, id);
if (fb)
drm_framebuffer_reference(fb);
if (fb) {
if (!kref_get_unless_zero(&fb->refcount))
fb = NULL;
}
mutex_unlock(&dev->mode_config.fb_lock);
 
return fb;
528,17 → 526,6
}
EXPORT_SYMBOL(drm_framebuffer_reference);
 
static void drm_framebuffer_free_bug(struct kref *kref)
{
BUG();
}
 
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
 
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
550,8 → 537,13
*/
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_device *dev;
 
if (!fb)
return;
 
dev = fb->dev;
 
mutex_lock(&dev->mode_config.fb_lock);
/* Mark fb as reaped and drop idr ref. */
__drm_framebuffer_unregister(dev, fb);
601,12 → 593,17
*/
void drm_framebuffer_remove(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_mode_set set;
int ret;
 
if (!fb)
return;
 
dev = fb->dev;
 
WARN_ON(!list_empty(&fb->filp_head));
 
/*
627,7 → 624,7
if (atomic_read(&fb->refcount.refcount) > 1) {
drm_modeset_lock_all(dev);
/* remove from any CRTC */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
639,7 → 636,7
}
}
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
drm_for_each_plane(plane, dev) {
if (plane->fb == fb)
drm_plane_force_disable(plane);
}
674,9 → 671,11
struct drm_mode_config *config = &dev->mode_config;
int ret;
 
WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY);
WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR);
 
crtc->dev = dev;
crtc->funcs = funcs;
crtc->invert_dimensions = false;
 
drm_modeset_lock_init(&crtc->mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
695,6 → 694,11
if (cursor)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
 
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&crtc->base, config->prop_active, 0);
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
}
 
return 0;
}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
740,7 → 744,7
unsigned int index = 0;
struct drm_crtc *tmp;
 
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
drm_for_each_crtc(tmp, crtc->dev) {
if (tmp == crtc)
return index;
 
766,6 → 770,95
}
 
/**
* drm_display_info_set_bus_formats - set the supported bus formats
* @info: display info to store bus formats in
* @formats: array containing the supported bus formats
* @num_formats: the number of entries in the fmts array
*
* Store the supported bus formats in display info structure.
* See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
* a full list of available formats.
*/
int drm_display_info_set_bus_formats(struct drm_display_info *info,
const u32 *formats,
unsigned int num_formats)
{
u32 *fmts = NULL;
 
if (!formats && num_formats)
return -EINVAL;
 
if (formats && num_formats) {
fmts = kmemdup(formats, sizeof(*formats) * num_formats,
GFP_KERNEL);
if (!fmts)
return -ENOMEM;
}
 
kfree(info->bus_formats);
info->bus_formats = fmts;
info->num_bus_formats = num_formats;
 
return 0;
}
EXPORT_SYMBOL(drm_display_info_set_bus_formats);
 
/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
*
* The kernel supports per-connector configration of its consoles through
* use of the video= parameter. This function parses that option and
* extracts the user's specified mode (or enable/disable status) for a
* particular connector. This is typically only used during the early fbdev
* setup.
*/
static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *mode = &connector->cmdline_mode;
char *option = NULL;
 
return;
#if 0
if (fb_get_options(connector->name, &option))
return;
 
if (!drm_mode_parse_command_line_for_connector(option,
connector,
mode))
return;
 
if (mode->force) {
const char *s;
 
switch (mode->force) {
case DRM_FORCE_OFF:
s = "OFF";
break;
case DRM_FORCE_ON_DIGITAL:
s = "ON - dig";
break;
default:
case DRM_FORCE_ON:
s = "ON";
break;
}
 
DRM_INFO("forcing %s connector %s\n", connector->name, s);
connector->force = mode->force;
}
 
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
connector->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
mode->margins ? " with margins" : "",
mode->interlace ? " interlaced" : "");
#endif
}
 
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
783,6 → 876,7
const struct drm_connector_funcs *funcs,
int connector_type)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
struct ida *connector_ida =
&drm_connector_enum_list[connector_type].ida;
817,17 → 911,25
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
 
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
drm_connector_get_cmdline_mode(connector);
 
/* We should add connectors at the end to avoid upsetting the connector
* index too much. */
list_add_tail(&connector->head, &config->connector_list);
config->num_connector++;
 
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_object_attach_property(&connector->base,
dev->mode_config.edid_property,
config->edid_property,
0);
 
drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
config->dpms_property, 0);
 
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
}
 
connector->debugfs_entry = NULL;
 
out_put:
852,6 → 954,11
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
 
if (connector->tile_group) {
drm_mode_put_tile_group(dev, connector->tile_group);
connector->tile_group = NULL;
}
 
list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
 
861,6 → 968,7
ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id);
 
kfree(connector->display_info.bus_formats);
drm_mode_object_put(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
891,7 → 999,7
 
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
drm_for_each_connector(tmp, connector->dev) {
if (tmp == connector)
return index;
 
917,6 → 1025,9
 
drm_mode_object_register(connector->dev, &connector->base);
 
ret = drm_sysfs_connector_add(connector);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(drm_connector_register);
929,6 → 1040,7
*/
void drm_connector_unregister(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
}
EXPORT_SYMBOL(drm_connector_unregister);
 
945,7 → 1057,7
{
struct drm_connector *connector;
 
/* taking the mode config mutex ends up in a clash with sysfs */
/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_unregister(connector);
 
953,61 → 1065,6
EXPORT_SYMBOL(drm_connector_unplug_all);
 
/**
* drm_bridge_init - initialize a drm transcoder/bridge
* @dev: drm device
* @bridge: transcoder/bridge to set up
* @funcs: bridge function table
*
* Initialises a preallocated bridge. Bridges should be
* subclassed as part of driver connector objects.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
const struct drm_bridge_funcs *funcs)
{
int ret;
 
drm_modeset_lock_all(dev);
 
ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
if (ret)
goto out;
 
bridge->dev = dev;
bridge->funcs = funcs;
 
list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
dev->mode_config.num_bridge++;
 
out:
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_bridge_init);
 
/**
* drm_bridge_cleanup - cleans up an initialised bridge
* @bridge: bridge to cleanup
*
* Cleans up the bridge but doesn't free the object.
*/
void drm_bridge_cleanup(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
 
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &bridge->base);
list_del(&bridge->head);
dev->mode_config.num_bridge--;
drm_modeset_unlock_all(dev);
 
memset(bridge, 0, sizeof(*bridge));
}
EXPORT_SYMBOL(drm_bridge_cleanup);
 
/**
* drm_encoder_init - Init a preallocated encoder
* @dev: drm device
* @encoder: the encoder to init
1067,6 → 1124,7
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
 
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
1096,9 → 1154,10
int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, uint32_t format_count,
const uint32_t *formats, unsigned int format_count,
enum drm_plane_type type)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
 
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
1110,7 → 1169,7
plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
GFP_KERNEL);
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
1123,15 → 1182,28
plane->possible_crtcs = possible_crtcs;
plane->type = type;
 
list_add_tail(&plane->head, &dev->mode_config.plane_list);
dev->mode_config.num_total_plane++;
list_add_tail(&plane->head, &config->plane_list);
config->num_total_plane++;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
dev->mode_config.num_overlay_plane++;
config->num_overlay_plane++;
 
drm_object_attach_property(&plane->base,
dev->mode_config.plane_type_property,
config->plane_type_property,
plane->type);
 
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
drm_object_attach_property(&plane->base, config->prop_src_x, 0);
drm_object_attach_property(&plane->base, config->prop_src_y, 0);
drm_object_attach_property(&plane->base, config->prop_src_w, 0);
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
}
 
return 0;
}
EXPORT_SYMBOL(drm_universal_plane_init);
1156,7 → 1228,7
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, uint32_t format_count,
const uint32_t *formats, unsigned int format_count,
bool is_primary)
{
enum drm_plane_type type;
1211,7 → 1283,7
unsigned int index = 0;
struct drm_plane *tmp;
 
list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
drm_for_each_plane(tmp, plane->dev) {
if (tmp == plane)
return index;
 
1223,6 → 1295,29
EXPORT_SYMBOL(drm_plane_index);
 
/**
* drm_plane_from_index - find the registered plane at an index
* @dev: DRM device
* @idx: index of registered plane to find for
*
* Given a plane index, return the registered plane from DRM device's
* list of planes with matching index.
*/
struct drm_plane *
drm_plane_from_index(struct drm_device *dev, int idx)
{
struct drm_plane *plane;
unsigned int i = 0;
 
drm_for_each_plane(plane, dev) {
if (i == idx)
return plane;
i++;
}
return NULL;
}
EXPORT_SYMBOL(drm_plane_from_index);
 
/**
* drm_plane_force_disable - Forcibly disable a plane
* @plane: plane to disable
*
1246,7 → 1341,7
return;
}
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(plane->old_fb);
drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
plane->fb = NULL;
plane->crtc = NULL;
1253,51 → 1348,123
}
EXPORT_SYMBOL(drm_plane_force_disable);
 
static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
static int drm_mode_create_standard_properties(struct drm_device *dev)
{
struct drm_property *edid;
struct drm_property *dpms;
struct drm_property *dev_path;
struct drm_property *prop;
 
/*
* Standard properties (apply to all connectors)
*/
edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"EDID", 0);
dev->mode_config.edid_property = edid;
if (!prop)
return -ENOMEM;
dev->mode_config.edid_property = prop;
 
dpms = drm_property_create_enum(dev, 0,
prop = drm_property_create_enum(dev, 0,
"DPMS", drm_dpms_enum_list,
ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
if (!prop)
return -ENOMEM;
dev->mode_config.dpms_property = prop;
 
dev_path = drm_property_create(dev,
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"PATH", 0);
dev->mode_config.path_property = dev_path;
if (!prop)
return -ENOMEM;
dev->mode_config.path_property = prop;
 
dev->mode_config.tile_property = drm_property_create(dev,
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"TILE", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.tile_property = prop;
 
return 0;
}
 
static int drm_mode_create_standard_plane_properties(struct drm_device *dev)
{
struct drm_property *type;
 
/*
* Standard properties (apply to all planes)
*/
type = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"type", drm_plane_type_enum_list,
ARRAY_SIZE(drm_plane_type_enum_list));
dev->mode_config.plane_type_property = type;
if (!prop)
return -ENOMEM;
dev->mode_config.plane_type_property = prop;
 
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_X", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_x = prop;
 
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_Y", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_y = prop;
 
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_W", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_w = prop;
 
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"SRC_H", 0, UINT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_src_h = prop;
 
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_X", INT_MIN, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_x = prop;
 
prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_Y", INT_MIN, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_y = prop;
 
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_W", 0, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_w = prop;
 
prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_H", 0, INT_MAX);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_h = prop;
 
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
"FB_ID", DRM_MODE_OBJECT_FB);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_fb_id = prop;
 
prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
"CRTC_ID", DRM_MODE_OBJECT_CRTC);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
 
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
"ACTIVE");
if (!prop)
return -ENOMEM;
dev->mode_config.prop_active = prop;
 
prop = drm_property_create(dev,
DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
"MODE_ID", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
 
return 0;
}
 
1345,7 → 1512,7
*/
int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
char *modes[])
const char * const modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
1361,6 → 1528,9
"select subconnector",
drm_tv_select_enum_list,
ARRAY_SIZE(drm_tv_select_enum_list));
if (!tv_selector)
goto nomem;
 
dev->mode_config.tv_select_subconnector_property = tv_selector;
 
tv_subconnector =
1368,6 → 1538,8
"subconnector",
drm_tv_subconnector_enum_list,
ARRAY_SIZE(drm_tv_subconnector_enum_list));
if (!tv_subconnector)
goto nomem;
dev->mode_config.tv_subconnector_property = tv_subconnector;
 
/*
1375,19 → 1547,30
*/
dev->mode_config.tv_left_margin_property =
drm_property_create_range(dev, 0, "left margin", 0, 100);
if (!dev->mode_config.tv_left_margin_property)
goto nomem;
 
dev->mode_config.tv_right_margin_property =
drm_property_create_range(dev, 0, "right margin", 0, 100);
if (!dev->mode_config.tv_right_margin_property)
goto nomem;
 
dev->mode_config.tv_top_margin_property =
drm_property_create_range(dev, 0, "top margin", 0, 100);
if (!dev->mode_config.tv_top_margin_property)
goto nomem;
 
dev->mode_config.tv_bottom_margin_property =
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
if (!dev->mode_config.tv_bottom_margin_property)
goto nomem;
 
dev->mode_config.tv_mode_property =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
"mode", num_modes);
if (!dev->mode_config.tv_mode_property)
goto nomem;
 
for (i = 0; i < num_modes; i++)
drm_property_add_enum(dev->mode_config.tv_mode_property, i,
i, modes[i]);
1394,23 → 1577,37
 
dev->mode_config.tv_brightness_property =
drm_property_create_range(dev, 0, "brightness", 0, 100);
if (!dev->mode_config.tv_brightness_property)
goto nomem;
 
dev->mode_config.tv_contrast_property =
drm_property_create_range(dev, 0, "contrast", 0, 100);
if (!dev->mode_config.tv_contrast_property)
goto nomem;
 
dev->mode_config.tv_flicker_reduction_property =
drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
if (!dev->mode_config.tv_flicker_reduction_property)
goto nomem;
 
dev->mode_config.tv_overscan_property =
drm_property_create_range(dev, 0, "overscan", 0, 100);
if (!dev->mode_config.tv_overscan_property)
goto nomem;
 
dev->mode_config.tv_saturation_property =
drm_property_create_range(dev, 0, "saturation", 0, 100);
if (!dev->mode_config.tv_saturation_property)
goto nomem;
 
dev->mode_config.tv_hue_property =
drm_property_create_range(dev, 0, "hue", 0, 100);
if (!dev->mode_config.tv_hue_property)
goto nomem;
 
return 0;
nomem:
return -ENOMEM;
}
EXPORT_SYMBOL(drm_mode_create_tv_properties);
 
1515,154 → 1712,6
}
EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
 
static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
{
uint32_t total_objects = 0;
 
total_objects += dev->mode_config.num_crtc;
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
total_objects += dev->mode_config.num_bridge;
 
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
return -ENOMEM;
 
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
group->num_bridges = 0;
return 0;
}
 
void drm_mode_group_destroy(struct drm_mode_group *group)
{
kfree(group->id_list);
group->id_list = NULL;
}
 
/*
* NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
* the drm core's responsibility to set up mode control groups.
*/
int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_mode_group *group)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_bridge *bridge;
int ret;
 
if ((ret = drm_mode_group_init(dev, group)))
return ret;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
group->id_list[group->num_crtcs++] = crtc->base.id;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
group->id_list[group->num_crtcs + group->num_encoders++] =
encoder->base.id;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors++] = connector->base.id;
 
list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors + group->num_bridges++] =
bridge->base.id;
 
return 0;
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
 
void drm_reinit_primary_mode_group(struct drm_device *dev)
{
drm_modeset_lock_all(dev);
drm_mode_group_destroy(&dev->primary->mode_group);
drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_reinit_primary_mode_group);
 
/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in)
{
WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
"timing values too large for mode info\n");
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
 
/**
* drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
* Returns:
* Zero on success, negative errno on failure.
*/
static int drm_crtc_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
{
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
return -ERANGE;
 
if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
return -EINVAL;
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 
return 0;
}
 
 
#if 0
/**
* drm_mode_getresources - get graphics configuration
1692,12 → 1741,11
int crtc_count = 0;
int fb_count = 0;
int encoder_count = 0;
int copied = 0, i;
int copied = 0;
uint32_t __user *fb_id;
uint32_t __user *crtc_id;
uint32_t __user *connector_id;
uint32_t __user *encoder_id;
struct drm_mode_group *mode_group;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
1730,25 → 1778,15
/* mode_config.mutex protects the connector list against e.g. DP MST
* connector hot-adding. CRTC/Plane lists are invariant. */
mutex_lock(&dev->mode_config.mutex);
if (!drm_is_primary_client(file_priv)) {
 
mode_group = NULL;
list_for_each(lh, &dev->mode_config.crtc_list)
drm_for_each_crtc(crtc, dev)
crtc_count++;
 
list_for_each(lh, &dev->mode_config.connector_list)
drm_for_each_connector(connector, dev)
connector_count++;
 
list_for_each(lh, &dev->mode_config.encoder_list)
drm_for_each_encoder(encoder, dev)
encoder_count++;
} else {
 
mode_group = &file_priv->master->minor->mode_group;
crtc_count = mode_group->num_crtcs;
connector_count = mode_group->num_connectors;
encoder_count = mode_group->num_encoders;
}
 
card_res->max_height = dev->mode_config.max_height;
card_res->min_height = dev->mode_config.min_height;
card_res->max_width = dev->mode_config.max_width;
1758,9 → 1796,7
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
if (!mode_group) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) {
drm_for_each_crtc(crtc, dev) {
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
1768,17 → 1804,7
}
copied++;
}
} else {
for (i = 0; i < mode_group->num_crtcs; i++) {
if (put_user(mode_group->id_list[i],
crtc_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
}
card_res->count_crtcs = crtc_count;
 
/* Encoders */
1785,10 → 1811,7
if (card_res->count_encoders >= encoder_count) {
copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
if (!mode_group) {
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
head) {
drm_for_each_encoder(encoder, dev) {
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
encoder->name);
if (put_user(encoder->base.id, encoder_id +
1798,18 → 1821,7
}
copied++;
}
} else {
for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
if (put_user(mode_group->id_list[i],
encoder_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
 
}
}
card_res->count_encoders = encoder_count;
 
/* Connectors */
1816,10 → 1828,7
if (card_res->count_connectors >= connector_count) {
copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
if (!mode_group) {
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
drm_for_each_connector(connector, dev) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
connector->name);
1830,19 → 1839,7
}
copied++;
}
} else {
int start = mode_group->num_crtcs +
mode_group->num_encoders;
for (i = start; i < start + mode_group->num_connectors; i++) {
if (put_user(mode_group->id_list[i],
connector_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
}
card_res->count_connectors = connector_count;
 
DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
1880,8 → 1877,6
return -ENOENT;
 
drm_modeset_lock_crtc(crtc, crtc->primary);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
if (crtc->primary->fb)
crtc_resp->fb_id = crtc->primary->fb->base.id;
1888,14 → 1883,27
else
crtc_resp->fb_id = 0;
 
if (crtc->state) {
crtc_resp->x = crtc->primary->state->src_x >> 16;
crtc_resp->y = crtc->primary->state->src_y >> 16;
if (crtc->state->enable) {
drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
crtc_resp->mode_valid = 1;
 
} else {
crtc_resp->mode_valid = 0;
}
} else {
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
if (crtc->enabled) {
 
drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode);
crtc_resp->mode_valid = 1;
 
} else {
crtc_resp->mode_valid = 0;
}
}
drm_modeset_unlock_crtc(crtc);
 
return 0;
1923,6 → 1931,44
return connector->encoder;
}
 
/* helper for getconnector and getproperties ioctls */
static int get_properties(struct drm_mode_object *obj, bool atomic,
uint32_t __user *prop_ptr, uint64_t __user *prop_values,
uint32_t *arg_count_props)
{
int props_count;
int i, ret, copied;
 
props_count = obj->properties->count;
if (!atomic)
props_count -= obj->properties->atomic_count;
 
if ((*arg_count_props >= props_count) && props_count) {
for (i = 0, copied = 0; copied < props_count; i++) {
struct drm_property *prop = obj->properties->properties[i];
uint64_t val;
 
if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
continue;
 
ret = drm_object_property_get_value(obj, prop, &val);
if (ret)
return ret;
 
if (put_user(prop->base.id, prop_ptr + copied))
return -EFAULT;
 
if (put_user(val, prop_values + copied))
return -EFAULT;
 
copied++;
}
}
*arg_count_props = props_count;
 
return 0;
}
 
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
1944,7 → 1990,6
struct drm_encoder *encoder;
struct drm_display_mode *mode;
int mode_count = 0;
int props_count = 0;
int encoders_count = 0;
int ret = 0;
int copied = 0;
1951,8 → 1996,6
int i;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *prop_ptr;
uint64_t __user *prop_values;
uint32_t __user *encoder_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
1967,16 → 2010,12
connector = drm_connector_find(dev, out_resp->connector_id);
if (!connector) {
ret = -ENOENT;
goto out;
goto out_unlock;
}
 
props_count = connector->properties.count;
 
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
if (connector->encoder_ids[i] != 0)
encoders_count++;
}
}
 
if (out_resp->count_modes == 0) {
connector->funcs->fill_modes(connector,
1996,14 → 2035,13
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
 
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
/*
* This ioctl is called twice, once to determine how much space is
2016,7 → 2054,7
if (!drm_mode_expose_to_userspace(mode, file_priv))
continue;
 
drm_crtc_convert_to_umode(&u_mode, mode);
drm_mode_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
ret = -EFAULT;
2027,27 → 2065,13
}
out_resp->count_modes = mode_count;
 
if ((out_resp->count_props >= props_count) && props_count) {
copied = 0;
prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < connector->properties.count; i++) {
if (put_user(connector->properties.ids[i],
prop_ptr + copied)) {
ret = -EFAULT;
ret = get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
if (ret)
goto out;
}
 
if (put_user(connector->properties.values[i],
prop_values + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
out_resp->count_props = props_count;
 
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
2065,6 → 2089,9
out_resp->count_encoders = encoders_count;
 
out:
drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
out_unlock:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
2078,7 → 2105,7
 
/* For atomic drivers only state objects are synchronously updated and
* protected by modeset locks, so check those first. */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_for_each_connector(connector, dev) {
if (!connector->state)
continue;
 
2128,8 → 2155,6
crtc = drm_encoder_get_crtc(encoder);
if (crtc)
enc_resp->crtc_id = crtc->base.id;
else if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
else
enc_resp->crtc_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
2184,7 → 2209,7
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
/* Plane lists are invariant, no locking needed. */
list_for_each_entry(plane, &config->plane_list, head) {
drm_for_each_plane(plane, dev) {
/*
* Unless userspace set the 'universal planes'
* capability bit, only advertise overlays.
2263,7 → 2288,55
 
return 0;
}
#endif
 
/**
* drm_plane_check_pixel_format - Check if the plane supports the pixel format
* @plane: plane to check for format support
* @format: the pixel format
*
* Returns:
* Zero of @plane has @format in its list of supported pixel formats, -EINVAL
* otherwise.
*/
int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
{
unsigned int i;
 
for (i = 0; i < plane->format_count; i++) {
if (format == plane->format_types[i])
return 0;
}
 
return -EINVAL;
}
 
static int check_src_coords(uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_framebuffer *fb)
{
unsigned int fb_width, fb_height;
 
fb_width = fb->width << 16;
fb_height = fb->height << 16;
 
/* Make sure source coordinates are inside the fb. */
if (src_w > fb_width ||
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
return -ENOSPC;
}
 
return 0;
}
 
/*
* setplane_internal - setplane handler for internal callers
*
2283,8 → 2356,6
uint32_t src_w, uint32_t src_h)
{
int ret = 0;
unsigned int fb_width, fb_height;
unsigned int i;
 
/* No fb means shut it down */
if (!fb) {
2307,34 → 2378,28
}
 
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
if (fb->pixel_format == plane->format_types[i])
break;
if (i == plane->format_count) {
ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
if (ret) {
DRM_DEBUG_KMS("Invalid pixel format %s\n",
drm_get_format_name(fb->pixel_format));
ret = -EINVAL;
goto out;
}
 
fb_width = fb->width << 16;
fb_height = fb->height << 16;
 
/* Make sure source coordinates are inside the fb. */
if (src_w > fb_width ||
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
ret = -ENOSPC;
/* Give drivers some help against integer overflows */
if (crtc_w > INT_MAX ||
crtc_x > INT_MAX - (int32_t) crtc_w ||
crtc_h > INT_MAX ||
crtc_y > INT_MAX - (int32_t) crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
crtc_w, crtc_h, crtc_x, crtc_y);
ret = -ERANGE;
goto out;
}
 
ret = check_src_coords(src_x, src_y, src_w, src_h, fb);
if (ret)
goto out;
 
plane->old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
2401,17 → 2466,6
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
/* Give drivers some help against integer overflows */
if (plane_req->crtc_w > INT_MAX ||
plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
plane_req->crtc_h > INT_MAX ||
plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
plane_req->crtc_w, plane_req->crtc_h,
plane_req->crtc_x, plane_req->crtc_y);
return -ERANGE;
}
 
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
2449,7 → 2503,6
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
}
#endif
 
/**
* drm_mode_set_config_internal - helper to call ->set_config
2473,7 → 2526,7
* connectors from it), hence we need to refcount the fbs across all
* crtcs. Atomic modeset will have saner semantics ...
*/
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
drm_for_each_crtc(tmp, crtc->dev)
tmp->primary->old_fb = tmp->primary->fb;
 
fb = set->fb;
2484,7 → 2537,7
crtc->primary->fb = fb;
}
 
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
drm_for_each_crtc(tmp, crtc->dev) {
if (tmp->primary->fb)
drm_framebuffer_reference(tmp->primary->fb);
// if (tmp->old_fb)
2495,8 → 2548,28
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
 
#if 0
/**
* drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
* @mode: mode to query
* @hdisplay: hdisplay value to fill in
* @vdisplay: vdisplay value to fill in
*
* The vdisplay value will be doubled if the specified mode is a stereo mode of
* the appropriate layout.
*/
void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay)
{
struct drm_display_mode adjusted;
 
drm_mode_copy(&adjusted, mode);
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
*hdisplay = adjusted.crtc_hdisplay;
*vdisplay = adjusted.crtc_vdisplay;
}
EXPORT_SYMBOL(drm_crtc_get_hv_timing);
 
/**
* drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
* CRTC viewport
* @crtc: CRTC that framebuffer will be displayed on
2513,34 → 2586,19
{
int hdisplay, vdisplay;
 
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
 
if (drm_mode_is_stereo(mode)) {
struct drm_display_mode adjusted = *mode;
 
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
hdisplay = adjusted.crtc_hdisplay;
vdisplay = adjusted.crtc_vdisplay;
}
 
if (crtc->invert_dimensions)
if (crtc->state &&
crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) |
BIT(DRM_ROTATE_270)))
swap(hdisplay, vdisplay);
 
if (hdisplay > fb->width ||
vdisplay > fb->height ||
x > fb->width - hdisplay ||
y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height, hdisplay, vdisplay, x, y,
crtc->invert_dimensions ? " (inverted)" : "");
return -ENOSPC;
return check_src_coords(x << 16, y << 16,
hdisplay << 16, vdisplay << 16, fb);
}
 
return 0;
}
EXPORT_SYMBOL(drm_crtc_check_viewport);
 
#if 0
/**
* drm_mode_setcrtc - set CRTC configuration
* @dev: drm device for the ioctl
2571,8 → 2629,11
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
/* For some reason crtc x/y offsets are signed internally. */
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
/*
* Universal plane src offsets are only 16.16, prevent havoc for
* drivers using universal plane code internally.
*/
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
 
drm_modeset_lock_all(dev);
2612,7 → 2673,7
goto out;
}
 
ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
ret = drm_mode_convert_umode(mode, &crtc_req->mode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
goto out;
2620,6 → 2681,23
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
/*
* Check whether the primary plane supports the fb pixel format.
* Drivers not implementing the universal planes API use a
* default formats list provided by the DRM core which doesn't
* match real hardware capabilities. Skip the check in that
* case.
*/
if (!crtc->primary->format_default) {
ret = drm_plane_check_pixel_format(crtc->primary,
fb->pixel_format);
if (ret) {
DRM_DEBUG_KMS("Invalid pixel format %s\n",
drm_get_format_name(fb->pixel_format));
goto out;
}
}
 
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
mode, fb);
if (ret)
2649,7 → 2727,7
goto out;
}
 
connector_set = kmalloc(crtc_req->count_connectors *
connector_set = kmalloc_array(crtc_req->count_connectors,
sizeof(struct drm_connector *),
GFP_KERNEL);
if (!connector_set) {
2659,10 → 2737,10
 
for (i = 0; i < crtc_req->count_connectors; i++) {
set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
}
// if (get_user(out_id, &set_connectors_ptr[i])) {
// ret = -EFAULT;
// goto out;
// }
 
connector = drm_connector_find(dev, out_id);
if (!connector) {
2801,6 → 2879,7
void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor2 *req = data;
 
return drm_mode_cursor_common(dev, req, file_priv);
}
#endif
3004,12 → 3083,63
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
 
if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
r->modifier[i], i);
return -EINVAL;
}
 
/* modifier specific checks: */
switch (r->modifier[i]) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
/* NOTE: the pitch restriction may be lifted later if it turns
* out that no hw has this restriction:
*/
if (r->pixel_format != DRM_FORMAT_NV12 ||
width % 128 || height % 32 ||
r->pitches[i] % 128) {
DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
return -EINVAL;
}
break;
 
default:
break;
}
}
 
for (i = num_planes; i < 4; i++) {
if (r->modifier[i]) {
DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
return -EINVAL;
}
 
/* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
if (!(r->flags & DRM_MODE_FB_MODIFIERS))
continue;
 
if (r->handles[i]) {
DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
return -EINVAL;
}
 
if (r->pitches[i]) {
DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
return -EINVAL;
}
 
if (r->offsets[i]) {
DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
return -EINVAL;
}
}
 
return 0;
}
 
static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
static struct drm_framebuffer *
internal_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv)
{
3017,7 → 3147,7
struct drm_framebuffer *fb;
int ret;
 
if (r->flags & ~DRM_MODE_FB_INTERLACED) {
if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
3033,6 → 3163,12
return ERR_PTR(-EINVAL);
}
 
if (r->flags & DRM_MODE_FB_MODIFIERS &&
!dev->mode_config.allow_fb_modifiers) {
DRM_DEBUG_KMS("driver does not support fb modifiers\n");
return ERR_PTR(-EINVAL);
}
 
ret = framebuffer_check(r);
if (ret)
return ERR_PTR(ret);
3043,12 → 3179,6
return fb;
}
 
mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
 
return fb;
}
 
3070,15 → 3200,24
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
fb = add_framebuffer_internal(dev, data, file_priv);
fb = internal_framebuffer_create(dev, r, file_priv);
if (IS_ERR(fb))
return PTR_ERR(fb);
 
/* Transfer ownership to the filp for reaping on close */
 
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
mutex_unlock(&file_priv->fbs_lock);
 
return 0;
}
 
3118,14 → 3257,11
if (!found)
goto fail_lookup;
 
/* Mark fb as reaped, we still have a ref from fpriv->fbs. */
__drm_framebuffer_unregister(dev, fb);
 
list_del_init(&fb->filp_head);
mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&file_priv->fbs_lock);
 
drm_framebuffer_remove(fb);
drm_framebuffer_unreference(fb);
 
return 0;
 
3249,7 → 3385,7
ret = -EINVAL;
goto out_err1;
}
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
goto out_err1;
3292,7 → 3428,6
*/
void drm_fb_release(struct drm_file *priv)
{
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
 
/*
3306,16 → 3441,10
* at it any more.
*/
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
 
mutex_lock(&dev->mode_config.fb_lock);
/* Mark fb as reaped, we still have a ref from fpriv->fbs. */
__drm_framebuffer_unregister(dev, fb);
mutex_unlock(&dev->mode_config.fb_lock);
 
list_del_init(&fb->filp_head);
 
/* This will also drop the fpriv->fbs reference. */
drm_framebuffer_remove(fb);
/* This drops the fpriv->fbs reference. */
drm_framebuffer_unreference(fb);
}
}
#endif
3352,7 → 3481,8
property->dev = dev;
 
if (num_values) {
property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
property->values = kcalloc(num_values, sizeof(uint64_t),
GFP_KERNEL);
if (!property->values)
goto fail;
}
3501,7 → 3631,7
}
 
/**
* drm_property_create_range - create a new ranged property type
* drm_property_create_range - create a new unsigned ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
3512,8 → 3642,8
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Userspace is allowed to set any integer value in the (min, max) range
* inclusive.
* Userspace is allowed to set any unsigned integer value in the (min, max)
* range inclusive.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
3527,6 → 3657,24
}
EXPORT_SYMBOL(drm_property_create_range);
 
/**
* drm_property_create_signed_range - create a new signed ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @min: minimum value of the property
* @max: maximum value of the property
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Userspace is allowed to set any signed integer value in the (min, max)
* range inclusive.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
int flags, const char *name,
int64_t min, int64_t max)
3536,6 → 3684,23
}
EXPORT_SYMBOL(drm_property_create_signed_range);
 
/**
* drm_property_create_object - create a new object property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
* @type: object type from DRM_MODE_OBJECT_* defines
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Userspace is only allowed to set this to any property value of the given
* @type. Only useful for atomic properties, which is enforced.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type)
{
3543,6 → 3708,9
 
flags |= DRM_MODE_PROP_OBJECT;
 
if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
return NULL;
 
property = drm_property_create(dev, flags, name, 1);
if (!property)
return NULL;
3554,6 → 3722,28
EXPORT_SYMBOL(drm_property_create_object);
 
/**
* drm_property_create_bool - create a new boolean property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
*
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* This is implemented as a ranged property with only {0, 1} as valid values.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
const char *name)
{
return drm_property_create_range(dev, flags, name, 0, 1);
}
EXPORT_SYMBOL(drm_property_create_bool);
 
/**
* drm_property_add_enum - add a possible value to an enumeration property
* @property: enumeration property to change
* @index: index of the new enumeration
3658,9 → 3848,11
return;
}
 
obj->properties->ids[count] = property->base.id;
obj->properties->properties[count] = property;
obj->properties->values[count] = init_val;
obj->properties->count++;
if (property->flags & DRM_MODE_PROP_ATOMIC)
obj->properties->atomic_count++;
}
EXPORT_SYMBOL(drm_object_attach_property);
 
3683,7 → 3875,7
int i;
 
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->ids[i] == property->base.id) {
if (obj->properties->properties[i] == property) {
obj->properties->values[i] = val;
return 0;
}
3712,8 → 3904,16
{
int i;
 
/* read-only properties bypass atomic mechanism and still store
* their value in obj->properties->values[].. mostly to avoid
* having to deal w/ EDID and similar props in atomic paths:
*/
if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
!(property->flags & DRM_MODE_PROP_IMMUTABLE))
return drm_atomic_get_property(obj, property, val);
 
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->ids[i] == property->base.id) {
if (obj->properties->properties[i] == property) {
*val = obj->properties->values[i];
return 0;
}
3825,7 → 4025,21
}
#endif
 
static struct drm_property_blob *
/**
* drm_property_create_blob - Create new blob property
*
* Creates a new blob property for a specified DRM device, optionally
* copying data.
*
* @dev: DRM device to create property for
* @length: Length to allocate for blob data
* @data: If specified, copies data into blob
*
* Returns:
* New blob property with a single reference on success, or an ERR_PTR
* value on failure.
*/
struct drm_property_blob *
drm_property_create_blob(struct drm_device *dev, size_t length,
const void *data)
{
3832,35 → 4046,267
struct drm_property_blob *blob;
int ret;
 
if (!length || !data)
return NULL;
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
 
blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
if (!blob)
return NULL;
return ERR_PTR(-ENOMEM);
 
/* This must be explicitly initialised, so we can safely call list_del
* on it in the removal handler, even if it isn't in a file list. */
INIT_LIST_HEAD(&blob->head_file);
blob->length = length;
blob->dev = dev;
 
if (data)
memcpy(blob->data, data, length);
 
mutex_lock(&dev->mode_config.blob_lock);
 
ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
if (ret) {
kfree(blob);
return NULL;
mutex_unlock(&dev->mode_config.blob_lock);
return ERR_PTR(-EINVAL);
}
 
blob->length = length;
kref_init(&blob->refcount);
 
memcpy(blob->data, data, length);
list_add_tail(&blob->head_global,
&dev->mode_config.property_blob_list);
 
list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
mutex_unlock(&dev->mode_config.blob_lock);
 
return blob;
}
EXPORT_SYMBOL(drm_property_create_blob);
 
static void drm_property_destroy_blob(struct drm_device *dev,
struct drm_property_blob *blob)
/**
* drm_property_free_blob - Blob property destructor
*
* Internal free function for blob properties; must not be used directly.
*
* @kref: Reference
*/
static void drm_property_free_blob(struct kref *kref)
{
drm_mode_object_put(dev, &blob->base);
list_del(&blob->head);
struct drm_property_blob *blob =
container_of(kref, struct drm_property_blob, refcount);
 
WARN_ON(!mutex_is_locked(&blob->dev->mode_config.blob_lock));
 
list_del(&blob->head_global);
list_del(&blob->head_file);
drm_mode_object_put(blob->dev, &blob->base);
 
kfree(blob);
}
 
/**
* drm_property_unreference_blob - Unreference a blob property
*
* Drop a reference on a blob property. May free the object.
*
* @blob: Pointer to blob property
*/
void drm_property_unreference_blob(struct drm_property_blob *blob)
{
struct drm_device *dev;
 
if (!blob)
return;
 
dev = blob->dev;
 
DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
 
if (kref_put_mutex(&blob->refcount, drm_property_free_blob,
&dev->mode_config.blob_lock))
mutex_unlock(&dev->mode_config.blob_lock);
else
might_lock(&dev->mode_config.blob_lock);
}
EXPORT_SYMBOL(drm_property_unreference_blob);
 
/**
* drm_property_unreference_blob_locked - Unreference a blob property with blob_lock held
*
* Drop a reference on a blob property. May free the object. This must be
* called with blob_lock held.
*
* @blob: Pointer to blob property
*/
static void drm_property_unreference_blob_locked(struct drm_property_blob *blob)
{
if (!blob)
return;
 
DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
 
kref_put(&blob->refcount, drm_property_free_blob);
}
 
/**
* drm_property_destroy_user_blobs - destroy all blobs created by this client
* @dev: DRM device
* @file_priv: destroy all blobs owned by this file handle
*/
void drm_property_destroy_user_blobs(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_property_blob *blob, *bt;
 
mutex_lock(&dev->mode_config.blob_lock);
 
list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
list_del_init(&blob->head_file);
drm_property_unreference_blob_locked(blob);
}
 
mutex_unlock(&dev->mode_config.blob_lock);
}
 
/**
* drm_property_reference_blob - Take a reference on an existing property
*
* Take a new reference on an existing blob property.
*
* @blob: Pointer to blob property
*/
struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
{
DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
kref_get(&blob->refcount);
return blob;
}
EXPORT_SYMBOL(drm_property_reference_blob);
 
/*
* Like drm_property_lookup_blob, but does not return an additional reference.
* Must be called with blob_lock held.
*/
static struct drm_property_blob *__drm_property_lookup_blob(struct drm_device *dev,
uint32_t id)
{
struct drm_mode_object *obj = NULL;
struct drm_property_blob *blob;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.blob_lock));
 
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != DRM_MODE_OBJECT_BLOB) || (obj->id != id))
blob = NULL;
else
blob = obj_to_blob(obj);
mutex_unlock(&dev->mode_config.idr_mutex);
 
return blob;
}
 
/**
* drm_property_lookup_blob - look up a blob property and take a reference
* @dev: drm device
* @id: id of the blob property
*
* If successful, this takes an additional reference to the blob property.
* callers need to make sure to eventually unreference the returned property
* again, using @drm_property_unreference_blob.
*/
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id)
{
struct drm_property_blob *blob;
 
mutex_lock(&dev->mode_config.blob_lock);
blob = __drm_property_lookup_blob(dev, id);
if (blob) {
if (!kref_get_unless_zero(&blob->refcount))
blob = NULL;
}
mutex_unlock(&dev->mode_config.blob_lock);
 
return blob;
}
EXPORT_SYMBOL(drm_property_lookup_blob);
 
/**
* drm_property_replace_global_blob - atomically replace existing blob property
* @dev: drm device
* @replace: location of blob property pointer to be replaced
* @length: length of data for new blob, or 0 for no data
* @data: content for new blob, or NULL for no data
* @obj_holds_id: optional object for property holding blob ID
* @prop_holds_id: optional property holding blob ID
* @return 0 on success or error on failure
*
* This function will atomically replace a global property in the blob list,
* optionally updating a property which holds the ID of that property. It is
* guaranteed to be atomic: no caller will be allowed to see intermediate
* results, and either the entire operation will succeed and clean up the
* previous property, or it will fail and the state will be unchanged.
*
* If length is 0 or data is NULL, no new blob will be created, and the holding
* property, if specified, will be set to 0.
*
* Access to the replace pointer is assumed to be protected by the caller, e.g.
* by holding the relevant modesetting object lock for its parent.
*
* For example, a drm_connector has a 'PATH' property, which contains the ID
* of a blob property with the value of the MST path information. Calling this
* function with replace pointing to the connector's path_blob_ptr, length and
* data set for the new path information, obj_holds_id set to the connector's
* base object, and prop_holds_id set to the path property name, will perform
* a completely atomic update. The access to path_blob_ptr is protected by the
* caller holding a lock on the connector.
*/
static int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
size_t length,
const void *data,
struct drm_mode_object *obj_holds_id,
struct drm_property *prop_holds_id)
{
struct drm_property_blob *new_blob = NULL;
struct drm_property_blob *old_blob = NULL;
int ret;
 
WARN_ON(replace == NULL);
 
old_blob = *replace;
 
if (length && data) {
new_blob = drm_property_create_blob(dev, length, data);
if (IS_ERR(new_blob))
return PTR_ERR(new_blob);
}
 
/* This does not need to be synchronised with blob_lock, as the
* get_properties ioctl locks all modesetting objects, and
* obj_holds_id must be locked before calling here, so we cannot
* have its value out of sync with the list membership modified
* below under blob_lock. */
if (obj_holds_id) {
ret = drm_object_property_set_value(obj_holds_id,
prop_holds_id,
new_blob ?
new_blob->base.id : 0);
if (ret != 0)
goto err_created;
}
 
drm_property_unreference_blob(old_blob);
*replace = new_blob;
 
return 0;
 
err_created:
drm_property_unreference_blob(new_blob);
return ret;
}
 
 
#if 0
/**
* drm_mode_getblob_ioctl - get the contents of a blob property value
3888,7 → 4334,8
return -EINVAL;
 
drm_modeset_lock_all(dev);
blob = drm_property_blob_find(dev, out_resp->blob_id);
mutex_lock(&dev->mode_config.blob_lock);
blob = __drm_property_lookup_blob(dev, out_resp->blob_id);
if (!blob) {
ret = -ENOENT;
goto done;
3904,6 → 4351,7
out_resp->length = blob->length;
 
done:
mutex_unlock(&dev->mode_config.blob_lock);
drm_modeset_unlock_all(dev);
return ret;
}
3912,7 → 4360,7
/**
* drm_mode_connector_set_path_property - set tile property on connector
* @connector: connector to set property on.
* @path: path to use for property.
* @path: path to use for property; must not be NULL.
*
* This creates a property to expose to userspace to specify a
* connector path. This is mainly used for DisplayPort MST where
3926,17 → 4374,14
const char *path)
{
struct drm_device *dev = connector->dev;
size_t size = strlen(path) + 1;
int ret;
 
connector->path_blob_ptr = drm_property_create_blob(connector->dev,
size, path);
if (!connector->path_blob_ptr)
return -EINVAL;
 
ret = drm_object_property_set_value(&connector->base,
dev->mode_config.path_property,
connector->path_blob_ptr->base.id);
ret = drm_property_replace_global_blob(dev,
&connector->path_blob_ptr,
strlen(path) + 1,
path,
&connector->base,
dev->mode_config.path_property);
return ret;
}
EXPORT_SYMBOL(drm_mode_connector_set_path_property);
3955,16 → 4400,16
int drm_mode_connector_set_tile_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
int ret, size;
char tile[256];
int ret;
 
if (connector->tile_blob_ptr)
drm_property_destroy_blob(dev, connector->tile_blob_ptr);
 
if (!connector->has_tile) {
connector->tile_blob_ptr = NULL;
ret = drm_object_property_set_value(&connector->base,
dev->mode_config.tile_property, 0);
ret = drm_property_replace_global_blob(dev,
&connector->tile_blob_ptr,
0,
NULL,
&connector->base,
dev->mode_config.tile_property);
return ret;
}
 
3973,16 → 4418,13
connector->num_h_tile, connector->num_v_tile,
connector->tile_h_loc, connector->tile_v_loc,
connector->tile_h_size, connector->tile_v_size);
size = strlen(tile) + 1;
 
connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
size, tile);
if (!connector->tile_blob_ptr)
return -EINVAL;
 
ret = drm_object_property_set_value(&connector->base,
dev->mode_config.tile_property,
connector->tile_blob_ptr->base.id);
ret = drm_property_replace_global_blob(dev,
&connector->tile_blob_ptr,
strlen(tile) + 1,
tile,
&connector->base,
dev->mode_config.tile_property);
return ret;
}
EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
4002,7 → 4444,7
const struct edid *edid)
{
struct drm_device *dev = connector->dev;
size_t size;
size_t size = 0;
int ret;
 
/* ignore requests to set edid when overridden */
4009,32 → 4451,108
if (connector->override_edid)
return 0;
 
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
 
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
size,
edid,
&connector->base,
dev->mode_config.edid_property);
return ret;
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
if (!connector->edid_blob_ptr)
return -EINVAL;
/* Some properties could refer to dynamic refcnt'd objects, or things that
* need special locking to handle lifetime issues (ie. to ensure the prop
* value doesn't become invalid part way through the property update due to
* race). The value returned by reference via 'obj' should be passed back
* to drm_property_change_valid_put() after the property is set (and the
* object to which the property is attached has a chance to take it's own
* reference).
*/
bool drm_property_change_valid_get(struct drm_property *property,
uint64_t value, struct drm_mode_object **ref)
{
int i;
 
ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
 
return ret;
*ref = NULL;
 
if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
if (value < property->values[0] || value > property->values[1])
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
int64_t svalue = U642I64(value);
 
if (svalue < U642I64(property->values[0]) ||
svalue > U642I64(property->values[1]))
return false;
return true;
} else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
uint64_t valid_mask = 0;
 
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
struct drm_property_blob *blob;
 
if (value == 0)
return true;
 
blob = drm_property_lookup_blob(property->dev, value);
if (blob) {
*ref = &blob->base;
return true;
} else {
return false;
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
} else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
/* a zero value for an object property translates to null: */
if (value == 0)
return true;
 
/* handle refcnt'd objects specially: */
if (property->values[0] == DRM_MODE_OBJECT_FB) {
struct drm_framebuffer *fb;
fb = drm_framebuffer_lookup(property->dev, value);
if (fb) {
*ref = &fb->base;
return true;
} else {
return false;
}
} else {
return _object_find(property->dev, value, property->values[0]) != NULL;
}
}
 
for (i = 0; i < property->num_values; i++)
if (property->values[i] == value)
return true;
return false;
}
 
void drm_property_change_valid_put(struct drm_property *property,
struct drm_mode_object *ref)
{
if (!ref)
return;
 
if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
if (property->values[0] == DRM_MODE_OBJECT_FB)
drm_framebuffer_unreference(obj_to_fb(ref));
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
drm_property_unreference_blob(obj_to_blob(ref));
}
 
 
 
static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
4044,9 → 4562,9
 
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
ret = 0;
if (connector->funcs->dpms)
(*connector->funcs->dpms)(connector, (int)value);
ret = 0;
ret = (*connector->funcs->dpms)(connector, (int)value);
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
 
4122,11 → 4640,6
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
int ret = 0;
int i;
int copied = 0;
int props_count = 0;
uint32_t __user *props_ptr;
uint64_t __user *prop_values_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
4143,30 → 4656,11
goto out;
}
 
props_count = obj->properties->count;
ret = get_properties(obj, file_priv->atomic,
(uint32_t __user *)(unsigned long)(arg->props_ptr),
(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
&arg->count_props);
 
/* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it. */
if ((arg->count_props >= props_count) && props_count) {
copied = 0;
props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
prop_values_ptr = (uint64_t __user *)(unsigned long)
(arg->prop_values_ptr);
for (i = 0; i < props_count; i++) {
if (put_user(obj->properties->ids[i],
props_ptr + copied)) {
ret = -EFAULT;
goto out;
}
if (put_user(obj->properties->values[i],
prop_values_ptr + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
arg->count_props = props_count;
out:
drm_modeset_unlock_all(dev);
return ret;
4195,8 → 4689,8
struct drm_mode_object *arg_obj;
struct drm_mode_object *prop_obj;
struct drm_property *property;
int ret = -EINVAL;
int i;
int i, ret = -EINVAL;
struct drm_mode_object *ref;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
4212,7 → 4706,7
goto out;
 
for (i = 0; i < arg_obj->properties->count; i++)
if (arg_obj->properties->ids[i] == arg->prop_id)
if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
break;
 
if (i == arg_obj->properties->count)
4226,7 → 4720,7
}
property = obj_to_property(prop_obj);
 
if (!drm_property_change_is_valid(property, arg->value))
if (!drm_property_change_valid_get(property, arg->value, &ref))
goto out;
 
switch (arg_obj->type) {
4243,6 → 4737,8
break;
}
 
drm_property_change_valid_put(property, ref);
 
out:
drm_modeset_unlock_all(dev);
return ret;
4293,7 → 4789,8
{
crtc->gamma_size = gamma_size;
 
crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
return -ENOMEM;
4456,25 → 4953,24
struct drm_encoder *encoder;
struct drm_connector *connector;
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
plane->funcs->reset(plane);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
drm_for_each_crtc(crtc, dev)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
drm_for_each_encoder(encoder, dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
connector->status = connector_status_unknown;
 
mutex_lock(&dev->mode_config.mutex);
drm_for_each_connector(connector, dev)
if (connector->funcs->reset)
connector->funcs->reset(connector);
mutex_unlock(&dev->mode_config.mutex);
}
}
EXPORT_SYMBOL(drm_mode_config_reset);
/*
* Just need to support RGB formats here for compat with code that doesn't
4710,7 → 5206,8
{
if (rotation & ~supported_rotations) {
rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
rotation = (rotation & DRM_REFLECT_MASK) |
BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
}
 
return rotation;
4735,10 → 5232,10
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
mutex_init(&dev->mode_config.fb_lock);
mutex_init(&dev->mode_config.blob_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.bridge_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
4747,8 → 5244,7
idr_init(&dev->mode_config.tile_idr);
 
drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
drm_mode_create_standard_plane_properties(dev);
drm_mode_create_standard_properties(dev);
drm_modeset_unlock_all(dev);
 
/* Just to be sure */
4779,7 → 5275,6
struct drm_connector *connector, *ot;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
struct drm_bridge *bridge, *brt;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_property_blob *blob, *bt;
4790,11 → 5285,6
encoder->funcs->destroy(encoder);
}
 
list_for_each_entry_safe(bridge, brt,
&dev->mode_config.bridge_list, head) {
bridge->funcs->destroy(bridge);
}
 
list_for_each_entry_safe(connector, ot,
&dev->mode_config.connector_list, head) {
connector->funcs->destroy(connector);
4806,8 → 5296,8
}
 
list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
head) {
drm_property_destroy_blob(dev, blob);
head_global) {
drm_property_unreference_blob(blob);
}
 
/*
4820,7 → 5310,7
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
drm_framebuffer_remove(fb);
drm_framebuffer_free(&fb->refcount);
}
 
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
4905,8 → 5395,8
mutex_lock(&dev->mode_config.idr_mutex);
idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
if (!memcmp(tg->group_data, topology, 8)) {
// if (!kref_get_unless_zero(&tg->refcount))
// tg = NULL;
if (!kref_get_unless_zero(&tg->refcount))
tg = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
4914,6 → 5404,7
mutex_unlock(&dev->mode_config.idr_mutex);
return NULL;
}
EXPORT_SYMBOL(drm_mode_get_tile_group);
 
/**
* drm_mode_create_tile_group - create a tile group from a displayid description
4952,3 → 5443,4
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
EXPORT_SYMBOL(drm_mode_create_tile_group);
/drivers/video/drm/drm_crtc_helper.c
111,7 → 111,17
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress) {
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
}
 
drm_for_each_connector(connector, dev)
if (connector->encoder == encoder)
return true;
return false;
133,8 → 143,15
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
/* FIXME: Locking around list access? */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
 
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
if (!oops_in_progress)
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
drm_for_each_encoder(encoder, dev)
if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
return true;
return false;
144,10 → 161,9
static void
drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
drm_bridge_disable(encoder->bridge);
 
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
154,8 → 170,7
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
 
if (encoder->bridge)
encoder->bridge->funcs->post_disable(encoder->bridge);
drm_bridge_post_disable(encoder->bridge);
}
 
static void __drm_helper_disable_unused_functions(struct drm_device *dev)
165,7 → 180,7
 
drm_warn_on_modeset_not_all_locked(dev);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
if (!drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnect encoder from any connector */
173,8 → 188,8
}
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
drm_for_each_crtc(crtc, dev) {
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
if (crtc_funcs->disable)
212,10 → 227,10
static void
drm_crtc_prepare_encoders(struct drm_device *dev)
{
struct drm_encoder_helper_funcs *encoder_funcs;
const struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
encoder_funcs = encoder->helper_private;
/* Disable unused encoders */
if (encoder->crtc == NULL)
253,9 → 268,9
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
const struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
bool saved_enabled;
struct drm_encoder *encoder;
275,6 → 290,7
}
 
saved_mode = crtc->mode;
saved_hwmode = crtc->hwmode;
saved_x = crtc->x;
saved_y = crtc->y;
 
289,19 → 305,17
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
 
if (encoder->crtc != crtc)
continue;
 
if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
ret = encoder->bridge->funcs->mode_fixup(
encoder->bridge, mode, adjusted_mode);
ret = drm_bridge_mode_fixup(encoder->bridge,
mode, adjusted_mode);
if (!ret) {
DRM_DEBUG_KMS("Bridge fixup failed\n");
goto done;
}
}
 
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
317,21 → 331,21
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
crtc->hwmode = *adjusted_mode;
 
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
 
if (encoder->crtc != crtc)
continue;
 
if (encoder->bridge)
encoder->bridge->funcs->disable(encoder->bridge);
drm_bridge_disable(encoder->bridge);
 
encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
encoder_funcs->prepare(encoder);
 
if (encoder->bridge)
encoder->bridge->funcs->post_disable(encoder->bridge);
drm_bridge_post_disable(encoder->bridge);
}
 
drm_crtc_prepare_encoders(dev);
345,7 → 359,7
if (!ret)
goto done;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
 
if (encoder->crtc != crtc)
continue;
356,32 → 370,25
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
 
if (encoder->bridge && encoder->bridge->funcs->mode_set)
encoder->bridge->funcs->mode_set(encoder->bridge, mode,
adjusted_mode);
drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
}
 
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
crtc_funcs->commit(crtc);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
 
if (encoder->crtc != crtc)
continue;
 
if (encoder->bridge)
encoder->bridge->funcs->pre_enable(encoder->bridge);
drm_bridge_pre_enable(encoder->bridge);
 
encoder_funcs = encoder->helper_private;
encoder_funcs->commit(encoder);
 
if (encoder->bridge)
encoder->bridge->funcs->enable(encoder->bridge);
drm_bridge_enable(encoder->bridge);
}
 
/* Store real post-adjustment hardware mode. */
crtc->hwmode = *adjusted_mode;
 
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
394,6 → 401,7
if (!ret) {
crtc->enabled = saved_enabled;
crtc->mode = saved_mode;
crtc->hwmode = saved_hwmode;
crtc->x = saved_x;
crtc->y = saved_y;
}
410,11 → 418,11
struct drm_encoder *encoder;
 
/* Decouple all encoders and their attached connectors from this crtc */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
continue;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_for_each_connector(connector, dev) {
if (connector->encoder != encoder)
continue;
 
455,7 → 463,7
bool fb_changed = false; /* if true and !mode_changed just do a flip */
struct drm_connector *save_connectors, *connector;
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
int ret;
int i;
511,12 → 519,12
* restored, not the drivers personal bookkeeping.
*/
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
save_encoders[count++] = *encoder;
}
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_for_each_connector(connector, dev) {
save_connectors[count++] = *connector;
}
 
554,8 → 562,8
 
/* a) traverse passed in connector list and get encoders for them */
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_connector_helper_funcs *connector_funcs =
drm_for_each_connector(connector, dev) {
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
594,7 → 602,7
}
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_for_each_connector(connector, dev) {
if (!connector->encoder)
continue;
 
677,12 → 685,12
fail:
/* Restore all previous data. */
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
*encoder = save_encoders[count++];
}
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_for_each_connector(connector, dev) {
*connector = save_connectors[count++];
}
 
704,7 → 712,7
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_for_each_connector(connector, dev)
if (connector->encoder == encoder)
if (connector->dpms < dpms)
dpms = connector->dpms;
715,26 → 723,22
static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_bridge *bridge = encoder->bridge;
struct drm_encoder_helper_funcs *encoder_funcs;
const struct drm_encoder_helper_funcs *encoder_funcs;
 
if (bridge) {
if (mode == DRM_MODE_DPMS_ON)
bridge->funcs->pre_enable(bridge);
drm_bridge_pre_enable(bridge);
else
bridge->funcs->disable(bridge);
}
drm_bridge_disable(bridge);
 
encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
encoder_funcs->dpms(encoder, mode);
 
if (bridge) {
if (mode == DRM_MODE_DPMS_ON)
bridge->funcs->enable(bridge);
drm_bridge_enable(bridge);
else
bridge->funcs->post_disable(bridge);
drm_bridge_post_disable(bridge);
}
}
 
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
742,7 → 746,7
struct drm_connector *connector;
struct drm_device *dev = crtc->dev;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_for_each_connector(connector, dev)
if (connector->encoder && connector->encoder->crtc == crtc)
if (connector->dpms < dpms)
dpms = connector->dpms;
758,8 → 762,11
* implementing the DPMS connector attribute. It computes the new desired DPMS
* state for all encoders and crtcs in the output mesh and calls the ->dpms()
* callback provided by the driver appropriately.
*
* Returns:
* Always returns 0.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
766,7 → 773,7
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
 
if (mode == connector->dpms)
return;
return 0;
 
old_dpms = connector->dpms;
connector->dpms = mode;
777,7 → 784,7
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
791,7 → 798,7
if (encoder)
drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
798,7 → 805,7
}
}
 
return;
return 0;
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
 
820,6 → 827,7
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
fb->modifier[i] = mode_cmd->modifier[i];
}
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
852,12 → 860,12
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_crtc_helper_funcs *crtc_funcs;
const struct drm_crtc_helper_funcs *crtc_funcs;
int encoder_dpms;
bool ret;
 
drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
drm_for_each_crtc(crtc, dev) {
 
if (!crtc->enabled)
continue;
871,7 → 879,7
 
/* Turn off outputs that were already powered off */
if (drm_helper_choose_crtc_dpms(crtc)) {
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
drm_for_each_encoder(encoder, dev) {
 
if(encoder->crtc != crtc)
continue;
917,46 → 925,49
struct drm_framebuffer *old_fb)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int ret;
 
if (crtc->funcs->atomic_duplicate_state)
crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
else if (crtc->state)
crtc_state = kmemdup(crtc->state, sizeof(*crtc_state),
GFP_KERNEL);
else
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
else {
if (!crtc->state)
drm_atomic_helper_crtc_reset(crtc);
 
crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
}
 
if (!crtc_state)
return -ENOMEM;
 
crtc_state->enable = true;
crtc_state->planes_changed = true;
crtc_state->mode_changed = true;
drm_mode_copy(&crtc_state->mode, mode);
ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
if (ret)
goto out;
drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
 
if (crtc_funcs->atomic_check) {
ret = crtc_funcs->atomic_check(crtc, crtc_state);
if (ret) {
kfree(crtc_state);
 
return ret;
if (ret)
goto out;
}
}
 
swap(crtc->state, crtc_state);
 
crtc_funcs->mode_set_nofb(crtc);
 
ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
 
out:
if (crtc_state) {
if (crtc->funcs->atomic_destroy_state)
crtc->funcs->atomic_destroy_state(crtc, crtc_state);
else
kfree(crtc_state);
drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
}
 
return drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
return ret;
}
EXPORT_SYMBOL(drm_helper_crtc_mode_set);
 
988,6 → 999,7
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
 
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
/drivers/video/drm/drm_crtc_internal.h
36,3 → 36,9
void drm_mode_object_put(struct drm_device *dev,
struct drm_mode_object *object);
 
/* drm_atomic.c */
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
 
/drivers/video/drm/drm_dp_helper.c
159,6 → 159,8
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
 
#define AUX_RETRY_INTERVAL 500 /* us */
 
/**
* DOC: dp helpers
*
354,6 → 356,37
EXPORT_SYMBOL(drm_dp_link_power_up);
 
/**
* drm_dp_link_power_down() - power down a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 value;
int err;
 
/* DP_SET_POWER register is only available on DPCD v1.1 and later */
if (link->revision < 0x11)
return 0;
 
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
if (err < 0)
return err;
 
value &= ~DP_SET_POWER_MASK;
value |= DP_SET_POWER_D3;
 
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(drm_dp_link_power_down);
 
/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
391,32 → 424,134
I2C_FUNC_10BIT_ADDR;
}
 
static void drm_dp_i2c_msg_write_status_update(struct drm_dp_aux_msg *msg)
{
/*
* In case of i2c defer or short i2c ack reply to a write,
* we need to switch to WRITE_STATUS_UPDATE to drain the
* rest of the message
*/
if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE) {
msg->request &= DP_AUX_I2C_MOT;
msg->request |= DP_AUX_I2C_WRITE_STATUS_UPDATE;
}
}
 
#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */
#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */
#define AUX_STOP_LEN 4
#define AUX_CMD_LEN 4
#define AUX_ADDRESS_LEN 20
#define AUX_REPLY_PAD_LEN 4
#define AUX_LENGTH_LEN 8
 
/*
* Calculate the duration of the AUX request/reply in usec. Gives the
* "best" case estimate, ie. successful while as short as possible.
*/
static int drm_dp_aux_req_duration(const struct drm_dp_aux_msg *msg)
{
int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN +
AUX_CMD_LEN + AUX_ADDRESS_LEN + AUX_LENGTH_LEN;
 
if ((msg->request & DP_AUX_I2C_READ) == 0)
len += msg->size * 8;
 
return len;
}
 
static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg)
{
int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN +
AUX_CMD_LEN + AUX_REPLY_PAD_LEN;
 
/*
* For read we expect what was asked. For writes there will
* be 0 or 1 data bytes. Assume 0 for the "best" case.
*/
if (msg->request & DP_AUX_I2C_READ)
len += msg->size * 8;
 
return len;
}
 
#define I2C_START_LEN 1
#define I2C_STOP_LEN 1
#define I2C_ADDR_LEN 9 /* ADDRESS + R/W + ACK/NACK */
#define I2C_DATA_LEN 9 /* DATA + ACK/NACK */
 
/*
* Calculate the length of the i2c transfer in usec, assuming
* the i2c bus speed is as specified. Gives the the "worst"
* case estimate, ie. successful while as long as possible.
* Doesn't account the the "MOT" bit, and instead assumes each
* message includes a START, ADDRESS and STOP. Neither does it
* account for additional random variables such as clock stretching.
*/
static int drm_dp_i2c_msg_duration(const struct drm_dp_aux_msg *msg,
int i2c_speed_khz)
{
/* AUX bitrate is 1MHz, i2c bitrate as specified */
return DIV_ROUND_UP((I2C_START_LEN + I2C_ADDR_LEN +
msg->size * I2C_DATA_LEN +
I2C_STOP_LEN) * 1000, i2c_speed_khz);
}
 
/*
* Deterine how many retries should be attempted to successfully transfer
* the specified message, based on the estimated durations of the
* i2c and AUX transfers.
*/
static int drm_dp_i2c_retry_count(const struct drm_dp_aux_msg *msg,
int i2c_speed_khz)
{
int aux_time_us = drm_dp_aux_req_duration(msg) +
drm_dp_aux_reply_duration(msg);
int i2c_time_us = drm_dp_i2c_msg_duration(msg, i2c_speed_khz);
 
return DIV_ROUND_UP(i2c_time_us, aux_time_us + AUX_RETRY_INTERVAL);
}
 
/*
* FIXME currently assumes 10 kHz as some real world devices seem
* to require it. We should query/set the speed via DPCD if supported.
*/
static int dp_aux_i2c_speed_khz __read_mostly = 10;
module_param_unsafe(dp_aux_i2c_speed_khz, int, 0644);
MODULE_PARM_DESC(dp_aux_i2c_speed_khz,
"Assumed speed of the i2c bus in kHz, (1-400, default 10)");
 
/*
* Transfer a single I2C-over-AUX message and handle various error conditions,
* retrying the transaction as appropriate. It is assumed that the
* aux->transfer function does not modify anything in the msg other than the
* reply field.
*
* Returns bytes transferred on success, or a negative error code on failure.
*/
static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
unsigned int retry;
int err;
 
unsigned int retry, defer_i2c;
int ret;
/*
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
* is required to retry at least seven times upon receiving AUX_DEFER
* before giving up the AUX transaction.
*
* We also try to account for the i2c bus speed.
*/
for (retry = 0; retry < 7; retry++) {
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
 
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, msg);
ret = aux->transfer(aux, msg);
mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
if (ret < 0) {
if (ret == -EBUSY)
continue;
 
DRM_DEBUG_KMS("transaction failed: %d\n", err);
return err;
DRM_DEBUG_KMS("transaction failed: %d\n", ret);
return ret;
}
 
 
429,11 → 564,11
break;
 
case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("native nack\n");
DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size);
return -EREMOTEIO;
 
case DP_AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("native defer");
DRM_DEBUG_KMS("native defer\n");
/*
* We could check for I2C bit rate capabilities and if
* available adjust this interval. We could also be
457,17 → 592,27
* Both native ACK and I2C ACK replies received. We
* can assume the transfer was successful.
*/
if (err < msg->size)
return -EPROTO;
return 0;
if (ret != msg->size)
drm_dp_i2c_msg_write_status_update(msg);
return ret;
 
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("I2C nack\n");
DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size);
aux->i2c_nack_count++;
return -EREMOTEIO;
 
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("I2C defer\n");
/* DP Compliance Test 4.2.2.5 Requirement:
* Must have at least 7 retries for I2C defers on the
* transaction to pass this test
*/
aux->i2c_defer_count++;
if (defer_i2c < 7)
defer_i2c++;
usleep_range(400, 500);
drm_dp_i2c_msg_write_status_update(msg);
 
continue;
 
default:
480,22 → 625,68
return -EREMOTEIO;
}
 
static void drm_dp_i2c_msg_set_request(struct drm_dp_aux_msg *msg,
const struct i2c_msg *i2c_msg)
{
msg->request = (i2c_msg->flags & I2C_M_RD) ?
DP_AUX_I2C_READ : DP_AUX_I2C_WRITE;
msg->request |= DP_AUX_I2C_MOT;
}
 
/*
* Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
*
* Returns an error code on failure, or a recommended transfer size on success.
*/
static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg)
{
int err, ret = orig_msg->size;
struct drm_dp_aux_msg msg = *orig_msg;
 
while (msg.size > 0) {
err = drm_dp_i2c_do_msg(aux, &msg);
if (err <= 0)
return err == 0 ? -EPROTO : err;
 
if (err < msg.size && err < ret) {
DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n",
msg.size, err);
ret = err;
}
 
msg.size -= err;
msg.buffer += err;
}
 
return ret;
}
 
/*
* Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX
* packets to be as large as possible. If not, the I2C transactions never
* succeed. Hence the default is maximum.
*/
static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES;
module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644);
MODULE_PARM_DESC(dp_aux_i2c_transfer_size,
"Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)");
 
static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num)
{
struct drm_dp_aux *aux = adapter->algo_data;
unsigned int i, j;
unsigned transfer_size;
struct drm_dp_aux_msg msg;
int err = 0;
 
dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
 
memset(&msg, 0, sizeof(msg));
 
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
msg.request = (msgs[i].flags & I2C_M_RD) ?
DP_AUX_I2C_READ :
DP_AUX_I2C_WRITE;
msg.request |= DP_AUX_I2C_MOT;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
/* Send a bare address packet to start the transaction.
* Zero sized messages specify an address only (bare
* address) transaction.
503,22 → 694,35
msg.buffer = NULL;
msg.size = 0;
err = drm_dp_i2c_do_msg(aux, &msg);
 
/*
* Reset msg.request in case in case it got
* changed into a WRITE_STATUS_UPDATE.
*/
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
 
if (err < 0)
break;
/*
* Many hardware implementations support FIFOs larger than a
* single byte, but it has been empirically determined that
* transferring data in larger chunks can actually lead to
* decreased performance. Therefore each message is simply
* transferred byte-by-byte.
/* We want each transaction to be as large as possible, but
* we'll go to smaller sizes if the hardware gives us a
* short reply.
*/
for (j = 0; j < msgs[i].len; j++) {
transfer_size = dp_aux_i2c_transfer_size;
for (j = 0; j < msgs[i].len; j += msg.size) {
msg.buffer = msgs[i].buf + j;
msg.size = 1;
msg.size = min(transfer_size, msgs[i].len - j);
 
err = drm_dp_i2c_do_msg(aux, &msg);
err = drm_dp_i2c_drain_msg(aux, &msg);
 
/*
* Reset msg.request in case in case it got
* changed into a WRITE_STATUS_UPDATE.
*/
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
 
if (err < 0)
break;
transfer_size = err;
}
if (err < 0)
break;
/drivers/video/drm/drm_dp_mst_topology.c
60,7 → 60,7
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
 
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
740,10 → 740,14
struct drm_dp_sideband_msg_tx *txmsg)
{
bool ret;
mutex_lock(&mgr->qlock);
 
/*
* All updates to txmsg->state are protected by mgr->qlock, and the two
* cases we check here are terminal states. For those the barriers
* provided by the wake_up/wait_event pair are enough.
*/
ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
mutex_unlock(&mgr->qlock);
return ret;
}
 
807,8 → 811,6
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
 
cancel_work_sync(&mstb->mgr->work);
 
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
866,20 → 868,33
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
 
if (!port->input) {
port->vcpi.num_slots = 0;
 
kfree(port->cached_edid);
if (port->connector)
(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
 
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
/*
* The only time we don't have a connector
* on an output port is if the connector init
* fails.
*/
if (port->connector) {
/* we can't destroy the connector here, as
* we might be holding the mode_config.mutex
* from an EDID retrieval */
 
mutex_lock(&mgr->destroy_connector_lock);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
// schedule_work(&mgr->destroy_connector_work);
return;
}
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
}
kfree(port);
 
(*mgr->cbs->hotplug)(mgr);
}
 
static void drm_dp_put_port(struct drm_dp_mst_port *port)
1021,8 → 1036,8
}
}
 
static void build_mst_prop_path(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *mstb,
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
int pnum,
char *proppath,
size_t proppath_size)
{
1035,7 → 1050,7
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
snprintf(temp, sizeof(temp), "-%d", port->port_num);
snprintf(temp, sizeof(temp), "-%d", pnum);
strlcat(proppath, temp, proppath_size);
}
 
1099,22 → 1114,32
drm_dp_port_teardown_pdt(port, old_pdt);
 
ret = drm_dp_port_setup_pdt(port);
if (ret == true) {
if (ret == true)
drm_dp_send_link_address(mstb->mgr, port->mstb);
port->mstb->link_address_sent = true;
}
}
 
if (created && !port->input) {
char proppath[255];
build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
 
build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
 
if (port->port_num >= 8) {
if (!port->connector) {
/* remove it from the port list */
mutex_lock(&mstb->mgr->lock);
list_del(&port->next);
mutex_unlock(&mstb->mgr->lock);
/* drop port list reference */
drm_dp_put_port(port);
goto out;
}
if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
(*mstb->mgr->cbs->register_connector)(port->connector);
}
 
out:
/* put reference to this port */
drm_dp_put_port(port);
}
1166,6 → 1191,8
struct drm_dp_mst_port *port;
int i;
/* find the port by iterating down */
 
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
 
for (i = 0; i < lct - 1; i++) {
1174,17 → 1201,19
 
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
if (!port->mstb) {
mstb = port->mstb;
if (!mstb) {
DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
return NULL;
goto out;
}
 
mstb = port->mstb;
break;
}
}
}
kref_get(&mstb->kref);
out:
mutex_unlock(&mgr->lock);
return mstb;
}
 
1192,11 → 1221,10
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *mstb_child;
if (!mstb->link_address_sent)
drm_dp_send_link_address(mgr, mstb);
 
if (!mstb->link_address_sent) {
drm_dp_send_link_address(mgr, mstb);
mstb->link_address_sent = true;
}
list_for_each_entry(port, &mstb->ports, next) {
if (port->input)
continue;
1207,18 → 1235,32
if (!port->available_pbn)
drm_dp_send_enum_path_resources(mgr, mstb, port);
 
if (port->mstb)
drm_dp_check_and_send_link_address(mgr, port->mstb);
if (port->mstb) {
mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
if (mstb_child) {
drm_dp_check_and_send_link_address(mgr, mstb_child);
drm_dp_put_mst_branch_device(mstb_child);
}
}
}
}
 
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
struct drm_dp_mst_branch *mstb;
 
drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
 
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (mstb) {
kref_get(&mstb->kref);
}
mutex_unlock(&mgr->lock);
if (mstb) {
drm_dp_check_and_send_link_address(mgr, mstb);
drm_dp_put_mst_branch_device(mstb);
}
}
 
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid)
1272,7 → 1314,6
goto retry;
}
DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
WARN(1, "fail\n");
 
return -EIO;
}
1370,12 → 1411,13
return 0;
}
 
/* must be called holding qlock */
static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
 
WARN_ON(!mutex_is_locked(&mgr->qlock));
 
/* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_downq)) {
mgr->tx_down_in_progress = false;
1435,7 → 1477,7
mutex_unlock(&mgr->qlock);
}
 
static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
int len;
1444,11 → 1486,12
 
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return -ENOMEM;
return;
 
txmsg->dst = mstb;
len = build_link_address(txmsg);
 
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
 
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1476,11 → 1519,12
}
(*mgr->cbs->hotplug)(mgr);
}
} else
} else {
mstb->link_address_sent = false;
DRM_DEBUG_KMS("link address failed %d\n", ret);
}
 
kfree(txmsg);
return 0;
}
 
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2240,10 → 2284,10
 
if (port->cached_edid)
edid = drm_edid_duplicate(port->cached_edid);
else
else {
edid = drm_get_edid(connector, &port->aux.ddc);
 
drm_mode_connector_set_tile_property(connector);
}
drm_dp_put_port(port);
return edid;
}
2326,6 → 2370,19
}
EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
 
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
int slots = 0;
port = drm_dp_get_validated_port_ref(mgr, port);
if (!port)
return slots;
 
slots = port->vcpi.num_slots;
drm_dp_put_port(port);
return slots;
}
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
 
/**
* drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
* @mgr: manager for this port
2588,8 → 2645,10
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
mutex_init(&mgr->destroy_connector_lock);
INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_connector_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
// init_waitqueue_head(&mgr->tx_waitq);
2650,12 → 2709,13
if (msgs[num - 1].flags & I2C_M_RD)
reading = true;
 
if (!reading) {
if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
ret = -EIO;
goto out;
}
 
memset(&msg, 0, sizeof(msg));
msg.req_type = DP_REMOTE_I2C_READ;
msg.u.i2c_read.num_transactions = num - 1;
msg.u.i2c_read.port_number = port->port_num;
/drivers/video/drm/drm_drv.c
26,11 → 26,14
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
 
bool drm_atomic = 0;
 
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/drivers/video/drm/drm_edid.c
146,327 → 146,359
* This table is copied from xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
/* 0x01 - 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x400@85Hz */
/* 0x02 - 640x400@85Hz */
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 400, 401, 404, 445, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 720x400@85Hz */
/* 0x03 - 720x400@85Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
828, 936, 0, 400, 401, 404, 446, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 640x480@60Hz */
/* 0x04 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@72Hz */
/* 0x05 - 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@75Hz */
/* 0x06 - 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@85Hz */
/* 0x07 - 640x480@85Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
752, 832, 0, 480, 481, 484, 509, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@56Hz */
/* 0x08 - 800x600@56Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@60Hz */
/* 0x09 - 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@72Hz */
/* 0x0a - 800x600@72Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@75Hz */
/* 0x0b - 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@85Hz */
/* 0x0c - 800x600@85Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@120Hz RB */
/* 0x0d - 800x600@120Hz RB */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
880, 960, 0, 600, 603, 607, 636, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
/* 0x0e - 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
/* 0x0f - 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 1024x768@60Hz */
/* 0x10 - 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1024x768@70Hz */
/* 0x11 - 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1024x768@75Hz */
/* 0x12 - 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
/* 0x13 - 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@120Hz RB */
/* 0x14 - 1024x768@120Hz RB */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
1104, 1184, 0, 768, 771, 775, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
/* 0x15 - 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz RB */
/* 0x55 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x16 - 1280x768@60Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 790, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
/* 0x17 - 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@75Hz */
/* 0x18 - 1280x768@75Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
1488, 1696, 0, 768, 771, 778, 805, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@85Hz */
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x19 - 1280x768@85Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@120Hz RB */
/* 0x1a - 1280x768@120Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz RB */
/* 0x1b - 1280x800@60Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 823, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
/* 0x1c - 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@75Hz */
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x1d - 1280x800@75Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
1488, 1696, 0, 800, 803, 809, 838, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@85Hz */
/* 0x1e - 1280x800@85Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@120Hz RB */
/* 0x1f - 1280x800@120Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 847, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
/* 0x20 - 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@85Hz */
/* 0x21 - 1280x960@85Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@120Hz RB */
/* 0x22 - 1280x960@120Hz RB */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
1360, 1440, 0, 960, 963, 967, 1017, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
/* 0x23 - 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@75Hz */
/* 0x24 - 1280x1024@75Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@85Hz */
/* 0x25 - 1280x1024@85Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@120Hz RB */
/* 0x26 - 1280x1024@120Hz RB */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
/* 0x27 - 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@120Hz RB */
/* 0x28 - 1360x768@120Hz RB */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
1440, 1520, 0, 768, 771, 776, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz RB */
/* 0x51 - 1366x768@60Hz */
{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 85500, 1366, 1436,
1579, 1792, 0, 768, 771, 774, 798, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x56 - 1366x768@60Hz */
{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 72000, 1366, 1380,
1436, 1500, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x29 - 1400x1050@60Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz */
/* 0x2a - 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@75Hz */
/* 0x2b - 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@85Hz */
/* 0x2c - 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@120Hz RB */
/* 0x2d - 1400x1050@120Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz RB */
/* 0x2e - 1440x900@60Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 926, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
/* 0x2f - 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@75Hz */
/* 0x30 - 1440x900@75Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
1688, 1936, 0, 900, 903, 909, 942, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@85Hz */
/* 0x31 - 1440x900@85Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@120Hz RB */
/* 0x32 - 1440x900@120Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 953, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
/* 0x53 - 1600x900@60Hz */
{ DRM_MODE("1600x900", DRM_MODE_TYPE_DRIVER, 108000, 1600, 1624,
1704, 1800, 0, 900, 901, 904, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x33 - 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@65Hz */
/* 0x34 - 1600x1200@65Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@70Hz */
/* 0x35 - 1600x1200@70Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
/* 0x36 - 1600x1200@75Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
/* 0x37 - 1600x1200@85Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@120Hz RB */
/* 0x38 - 1600x1200@120Hz RB */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz RB */
/* 0x39 - 1680x1050@60Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
/* 0x3a - 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@75Hz */
/* 0x3b - 1680x1050@75Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@85Hz */
/* 0x3c - 1680x1050@85Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@120Hz RB */
/* 0x3d - 1680x1050@120Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
/* 0x3e - 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@75Hz */
/* 0x3f - 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@120Hz RB */
/* 0x40 - 1792x1344@120Hz RB */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1856x1392@60Hz */
/* 0x41 - 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@75Hz */
/* 0x42 - 1856x1392@75Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
2208, 2560, 0, 1392, 1393, 1396, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@120Hz RB */
/* 0x43 - 1856x1392@120Hz RB */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz RB */
/* 0x52 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x44 - 1920x1200@60Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
/* 0x45 - 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@75Hz */
/* 0x46 - 1920x1200@75Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@85Hz */
/* 0x47 - 1920x1200@85Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@120Hz RB */
/* 0x48 - 1920x1200@120Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
/* 0x49 - 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@75Hz */
/* 0x4a - 1920x1440@75Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@120Hz RB */
/* 0x4b - 1920x1440@120Hz RB */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz RB */
/* 0x54 - 2048x1152@60Hz */
{ DRM_MODE("2048x1152", DRM_MODE_TYPE_DRIVER, 162000, 2048, 2074,
2154, 2250, 0, 1152, 1153, 1156, 1200, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x4c - 2560x1600@60Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
/* 0x4d - 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@75HZ */
/* 0x4e - 2560x1600@75Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@85HZ */
/* 0x4f - 2560x1600@85Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@120Hz RB */
/* 0x50 - 2560x1600@120Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x57 - 4096x2160@60Hz RB */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 556744, 4096, 4104,
4136, 4176, 0, 2160, 2208, 2216, 2222, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 0x58 - 4096x2160@59.94Hz RB */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 556188, 4096, 4104,
4136, 4176, 0, 2160, 2208, 2216, 2222, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
};
 
/*
1041,6 → 1073,7
* @raw_edid: pointer to raw EDID block
* @block: type of block to validate (0 for base, extension otherwise)
* @print_bad_edid: if true, dump bad EDID blocks to the console
* @edid_corrupt: if true, the header or checksum is invalid
*
* Validate a base or extension EDID block and optionally dump bad blocks to
* the console.
1047,7 → 1080,8
*
* Return: True if the block is valid, false otherwise.
*/
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
bool *edid_corrupt)
{
u8 csum;
struct edid *edid = (struct edid *)raw_edid;
1060,11 → 1094,22
 
if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= edid_fixup) {
if (score == 8) {
if (edid_corrupt)
*edid_corrupt = false;
} else if (score >= edid_fixup) {
/* Displayport Link CTS Core 1.2 rev1.1 test 4.2.2.6
* The corrupt flag needs to be set here otherwise, the
* fix-up code here will correct the problem, the
* checksum is correct and the test fails
*/
if (edid_corrupt)
*edid_corrupt = true;
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
if (edid_corrupt)
*edid_corrupt = true;
goto bad;
}
}
1075,6 → 1120,9
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
}
 
if (edid_corrupt)
*edid_corrupt = true;
 
/* allow CEA to slide through, switches mangle this */
if (raw_edid[0] != 0x02)
goto bad;
1129,7 → 1177,7
return false;
 
for (i = 0; i <= edid->extensions; i++)
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL))
return false;
 
return true;
1232,7 → 1280,8
for (i = 0; i < 4; i++) {
if (get_edid_block(data, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block, 0, print_bad_edid))
if (drm_edid_block_valid(block, 0, print_bad_edid,
&connector->edid_corrupt))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
1257,7 → 1306,10
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
if (drm_edid_block_valid(block + (valid_extensions + 1)
* EDID_LENGTH, j,
print_bad_edid,
NULL)) {
valid_extensions++;
break;
}
1992,7 → 2044,7
static bool valid_inferred_mode(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_display_mode *m;
const struct drm_display_mode *m;
bool ok = false;
 
list_for_each_entry(m, &connector->probed_modes, head) {
2366,6 → 2418,8
return closure.modes;
}
 
static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode);
 
static void
do_detailed_mode(struct detailed_timing *timing, void *c)
{
2382,6 → 2436,13
if (closure->preferred)
newmode->type |= DRM_MODE_TYPE_PREFERRED;
 
/*
* Detailed modes are limited to 10kHz pixel clock resolution,
* so fix up anything that looks like CEA/HDMI mode, but the clock
* is just slightly off.
*/
fixup_detailed_cea_mode_clock(newmode);
 
drm_mode_probed_add(closure->connector, newmode);
closure->modes++;
closure->preferred = 0;
2477,9 → 2538,9
* and the 60Hz variant otherwise.
*/
if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
clock = clock * 1001 / 1000;
clock = DIV_ROUND_CLOSEST(clock * 1001, 1000);
else
clock = DIV_ROUND_UP(clock * 1000, 1001);
clock = DIV_ROUND_CLOSEST(clock * 1000, 1001);
 
return clock;
}
3051,6 → 3112,45
return modes;
}
 
static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
{
const struct drm_display_mode *cea_mode;
int clock1, clock2, clock;
u8 mode_idx;
const char *type;
 
mode_idx = drm_match_cea_mode(mode) - 1;
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
type = "CEA";
cea_mode = &edid_cea_modes[mode_idx];
clock1 = cea_mode->clock;
clock2 = cea_mode_alternate_clock(cea_mode);
} else {
mode_idx = drm_match_hdmi_mode(mode) - 1;
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
type = "HDMI";
cea_mode = &edid_4k_modes[mode_idx];
clock1 = cea_mode->clock;
clock2 = hdmi_mode_alternate_clock(cea_mode);
} else {
return;
}
}
 
/* pick whichever is closest */
if (abs(mode->clock - clock1) < abs(mode->clock - clock2))
clock = clock1;
else
clock = clock2;
 
if (mode->clock == clock)
return;
 
DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
type, mode_idx + 1, mode->clock, clock);
mode->clock = clock;
}
 
static void
parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
{
3309,7 → 3409,7
* the sink doesn't support audio or video.
*/
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
int a, v;
3344,7 → 3444,6
/**
* drm_select_eld - select one ELD from multiple HDMI/DP sinks
* @encoder: the encoder just changed display mode
* @mode: the adjusted display mode
*
* It's possible for one encoder to be associated with multiple HDMI/DP sinks.
* The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
3352,8 → 3451,7
* Return: The connector associated with the first HDMI/DP sink that has ELD
* attached to it.
*/
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
3361,7 → 3459,7
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_for_each_connector(connector, dev)
if (connector->encoder == encoder && connector->eld[0])
return connector;
 
3712,10 → 3810,10
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
num_modes += add_alternate_cea_modes(connector, edid);
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
num_modes += add_alternate_cea_modes(connector, edid);
 
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
3750,7 → 3848,7
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
 
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
count = ARRAY_SIZE(drm_dmt_modes);
if (hdisplay < 0)
hdisplay = 0;
if (vdisplay < 0)
/drivers/video/drm/drm_fb_helper.c
38,7 → 38,14
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
 
static bool drm_fbdev_emulation = true;
module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
MODULE_PARM_DESC(fbdev_emulation,
"Enable legacy fbdev emulation [default=true]");
 
static LIST_HEAD(kernel_fb_helper_list);
 
/**
56,8 → 63,8
* Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
* drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
* should also notify the fb helper code from updates to the output
* drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback.
* They should also notify the fb helper code from updates to the output
* configuration by calling drm_fb_helper_hotplug_event(). For easier
* integration with the output polling code in drm_crtc_helper.c the modeset
* code provides a ->output_poll_changed callback.
89,8 → 96,9
* connectors to the fbdev, e.g. if some are reserved for special purposes or
* not adequate to be used for the fbcon.
*
* Since this is part of the initial setup before the fbdev is published, no
* locking is required.
* This function is protected against concurrent connector hotadds/removals
* using drm_fb_helper_add_one_connector() and
* drm_fb_helper_remove_one_connector().
*/
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
98,7 → 106,11
struct drm_connector *connector;
int i;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!drm_fbdev_emulation)
return 0;
 
mutex_lock(&dev->mode_config.mutex);
drm_for_each_connector(connector, dev) {
struct drm_fb_helper_connector *fb_helper_connector;
 
fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
108,6 → 120,7
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
115,6 → 128,8
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
mutex_unlock(&dev->mode_config.mutex);
 
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
124,6 → 139,9
struct drm_fb_helper_connector **temp;
struct drm_fb_helper_connector *fb_helper_connector;
 
if (!drm_fbdev_emulation)
return 0;
 
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
145,6 → 163,34
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 
static void remove_from_modeset(struct drm_mode_set *set,
struct drm_connector *connector)
{
int i, j;
 
for (i = 0; i < set->num_connectors; i++) {
if (set->connectors[i] == connector)
break;
}
 
if (i == set->num_connectors)
return;
 
for (j = i + 1; j < set->num_connectors; j++) {
set->connectors[j - 1] = set->connectors[j];
}
set->num_connectors--;
 
/*
* TODO maybe need to makes sure we set it back to !=NULL somewhere?
*/
if (set->num_connectors == 0) {
set->fb = NULL;
drm_mode_destroy(connector->dev, set->mode);
set->mode = NULL;
}
}
 
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
151,6 → 197,9
struct drm_fb_helper_connector *fb_helper_connector;
int i, j;
 
if (!drm_fbdev_emulation)
return 0;
 
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
 
for (i = 0; i < fb_helper->connector_count; i++) {
167,6 → 216,11
}
fb_helper->connector_count--;
kfree(fb_helper_connector);
 
/* also cleanup dangling references to the connector: */
for (i = 0; i < fb_helper->crtc_count; i++)
remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
 
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
201,17 → 255,97
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
 
/* Find the real fb for a given fb helper CRTC */
static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *c;
 
static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_for_each_crtc(c, dev) {
if (crtc->base.id == c->base.id)
return c->primary->fb;
}
 
return NULL;
}
static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
bool error = false;
struct drm_atomic_state *state;
int i, ret;
unsigned plane_mask;
 
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
 
state->acquire_ctx = dev->mode_config.acquire_ctx;
retry:
plane_mask = 0;
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
 
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto fail;
}
 
plane_state->rotation = BIT(DRM_ROTATE_0);
 
plane->old_fb = plane->fb;
plane_mask |= 1 << drm_plane_index(plane);
 
/* disable non-primary: */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
continue;
 
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
if (ret != 0)
goto fail;
}
 
for(i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
 
ret = __drm_atomic_helper_set_config(mode_set, state);
if (ret != 0)
goto fail;
}
 
ret = drm_atomic_commit(state);
 
fail:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
if (ret == -EDEADLK)
goto backoff;
 
if (ret != 0)
drm_atomic_state_free(state);
 
return ret;
 
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
goto retry;
}
 
static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
int i;
 
drm_warn_on_modeset_not_all_locked(dev);
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
if (fb_helper->atomic)
return restore_fbdev_mode_atomic(fb_helper);
 
drm_for_each_plane(plane, dev) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
 
227,18 → 361,24
struct drm_crtc *crtc = mode_set->crtc;
int ret;
 
if (crtc->funcs->cursor_set) {
if (crtc->funcs->cursor_set2) {
ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
if (ret)
return ret;
} else if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
error = true;
return ret;
}
 
ret = drm_mode_set_config_internal(mode_set);
if (ret)
error = true;
return ret;
}
return error;
 
return 0;
}
 
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: fbcon to restore
246,16 → 386,29
* This should be called from driver's drm ->lastclose callback
* when implementing an fbcon on top of kms using this helper. This ensures that
* the user isn't greeted with a black screen when e.g. X dies.
*
* RETURNS:
* Zero if everything went ok, negative error code otherwise.
*/
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
bool ret;
bool do_delayed = false;
bool do_delayed;
int ret;
 
if (!drm_fbdev_emulation)
return -ENODEV;
 
drm_modeset_lock_all(dev);
ret = restore_fbdev_mode(fb_helper);
 
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
fb_helper->delayed_hotplug = false;
drm_modeset_unlock_all(dev);
 
if (do_delayed)
drm_fb_helper_hotplug_event(fb_helper);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
266,7 → 419,12
struct drm_crtc *crtc;
int bound = 0, crtcs_bound = 0;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Sometimes user space wants everything disabled, so don't steal the
* display if there's a master. */
if (dev->primary->master)
return false;
 
drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb)
crtcs_bound++;
if (crtc->primary->fb == fb_helper->fb)
312,12 → 470,6
int i, j;
 
/*
* fbdev->blank can be called from irq context in case of a panic.
* Since we already have our own special panic handler which will
* restore the fbdev console mode completely, just bail out early.
*/
 
/*
* For each CRTC in this fb, turn the connectors on/off.
*/
drm_modeset_lock_all(dev);
350,6 → 502,9
*/
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
if (oops_in_progress)
return -EBUSY;
 
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
433,6 → 588,9
struct drm_crtc *crtc;
int i;
 
if (!drm_fbdev_emulation)
return 0;
 
if (!max_conn_count)
return -EINVAL;
 
461,11 → 619,13
}
 
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
drm_for_each_crtc(crtc, dev) {
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
 
fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
 
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
473,6 → 633,34
}
EXPORT_SYMBOL(drm_fb_helper_init);
 
/**
* drm_fb_helper_alloc_fbi - allocate fb_info and some of its members
* @fb_helper: driver-allocated fbdev helper
*
* A helper to alloc fb_info and the members cmap and apertures. Called
* by the driver within the fb_probe fb_helper callback function.
*
* RETURNS:
* fb_info pointer if things went okay, pointer containing error code
* otherwise
*/
struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
{
struct device *dev = fb_helper->dev->dev;
struct fb_info *info;
int ret;
 
info = framebuffer_alloc(0, dev);
if (!info)
return ERR_PTR(-ENOMEM);
 
 
fb_helper->fbdev = info;
 
return info;
 
}
EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
554,12 → 742,15
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc_helper_funcs *crtc_funcs;
const struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, j, rc = 0;
int start;
 
if (oops_in_progress)
return -EBUSY;
 
drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
709,6 → 900,9
struct drm_fb_helper *fb_helper = info->par;
struct fb_var_screeninfo *var = &info->var;
 
if (oops_in_progress)
return -EBUSY;
 
if (var->pixclock != 0) {
DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
720,6 → 914,66
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
 
static int pan_display_atomic(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_atomic_state *state;
struct drm_plane *plane;
int i, ret;
unsigned plane_mask;
 
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
 
state->acquire_ctx = dev->mode_config.acquire_ctx;
retry:
plane_mask = 0;
for(i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set;
 
mode_set = &fb_helper->crtc_info[i].mode_set;
 
mode_set->x = var->xoffset;
mode_set->y = var->yoffset;
 
ret = __drm_atomic_helper_set_config(mode_set, state);
if (ret != 0)
goto fail;
 
plane = mode_set->crtc->primary;
plane_mask |= drm_plane_index(plane);
plane->old_fb = plane->fb;
}
 
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
 
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
 
 
fail:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
if (ret == -EDEADLK)
goto backoff;
 
if (ret != 0)
drm_atomic_state_free(state);
 
return ret;
 
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
 
goto retry;
}
 
/**
* drm_fb_helper_pan_display - implementation for ->fb_pan_display
* @var: updated screen information
734,6 → 988,9
int ret = 0;
int i;
 
if (oops_in_progress)
return -EBUSY;
 
drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
740,6 → 997,11
return -EBUSY;
}
 
if (fb_helper->atomic) {
ret = pan_display_atomic(var, info);
goto unlock;
}
 
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
 
754,6 → 1016,7
}
}
}
unlock:
drm_modeset_unlock_all(dev);
return ret;
}
819,25 → 1082,47
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
int x, y;
struct drm_mode_set *mode_set;
int x, y, j;
/* in case of tile group, are we the last tile vert or horiz?
* If no tile group you are always the last one both vertically
* and horizontally
*/
bool lastv = true, lasth = true;
 
desired_mode = fb_helper->crtc_info[i].desired_mode;
mode_set = &fb_helper->crtc_info[i].mode_set;
 
if (!desired_mode)
continue;
 
crtc_count++;
 
x = fb_helper->crtc_info[i].x;
y = fb_helper->crtc_info[i].y;
if (desired_mode) {
 
if (gamma_size == 0)
gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
if (desired_mode->hdisplay + x < sizes.fb_width)
sizes.fb_width = desired_mode->hdisplay + x;
if (desired_mode->vdisplay + y < sizes.fb_height)
sizes.fb_height = desired_mode->vdisplay + y;
if (desired_mode->hdisplay + x > sizes.surface_width)
sizes.surface_width = desired_mode->hdisplay + x;
if (desired_mode->vdisplay + y > sizes.surface_height)
sizes.surface_height = desired_mode->vdisplay + y;
crtc_count++;
 
sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
 
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
if (connector->has_tile) {
lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
/* cloning to multiple tiles is just crazy-talk, so: */
break;
}
}
 
if (lasth)
sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
if (lastv)
sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
}
 
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
866,9 → 1151,11
 
 
info->var.pixclock = 0;
 
dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
 
 
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
 
return 0;
1034,13 → 1321,14
int width, int height)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
struct drm_display_mode *mode;
bool prefer_non_interlace;
 
return NULL;
 
#if 0
cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
return NULL;
 
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
1081,6 → 1369,7
cmdline_mode);
list_add(&mode->head, &fb_helper_conn->connector->modes);
return mode;
#endif
}
EXPORT_SYMBOL(drm_pick_cmdline_mode);
 
1303,7 → 1592,7
int c, o;
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
const struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
1496,11 → 1785,14
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
 
if (!drm_fbdev_emulation)
return 0;
 
mutex_lock(&dev->mode_config.mutex);
count = drm_fb_helper_probe_connector_modes(fb_helper,
dev->mode_config.max_width,
1544,6 → 1836,9
struct drm_device *dev = fb_helper->dev;
u32 max_width, max_height;
 
if (!drm_fbdev_emulation)
return 0;
 
mutex_lock(&fb_helper->dev->mode_config.mutex);
if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
/drivers/video/drm/drm_gem.c
462,7 → 462,7
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required.
*/
BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
}
 
733,10 → 733,11
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_gem_object *obj =
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
 
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
/drivers/video/drm/drm_global.c
31,7 → 31,6
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bug.h>
#include <drm/drm_global.h>
 
struct drm_global_item {
/drivers/video/drm/drm_internal.h
55,7 → 55,6
int drm_name_info(struct seq_file *m, void *data);
int drm_vm_info(struct seq_file *m, void *data);
int drm_bufs_info(struct seq_file *m, void *data);
int drm_vblank_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
 
70,12 → 69,11
struct drm_file *file_priv);
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
 
/* drm_sysfs.c */
extern struct class *drm_class;
 
struct class *drm_sysfs_create(struct module *owner, char *name);
int drm_sysfs_init(void);
void drm_sysfs_destroy(void);
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
int drm_sysfs_connector_add(struct drm_connector *connector);
/drivers/video/drm/drm_irq.c
42,9 → 42,22
#include <linux/vgaarb.h>
#include <linux/export.h>
 
ktime_t ktime_get(void);
 
static inline ktime_t ktime_get_real(void)
{
return ktime_get();
}
 
static inline ktime_t ktime_mono_to_real(ktime_t mono)
{
return mono;
}
 
 
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, crtc, count) \
((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
#define vblanktimestamp(dev, pipe, count) \
((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
57,21 → 70,280
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *tvblank, unsigned flags);
 
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
 
/*
* Clear vblank timestamp buffer for a crtc.
* Default to use monotonic timestamps for wait-for-vblank and page-flip
* complete events.
*/
unsigned int drm_timestamp_monotonic = 1;
 
static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
 
#if 0
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 
static void store_vblank(struct drm_device *dev, unsigned int pipe,
u32 vblank_count_inc,
struct timeval *t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 tslot;
 
assert_spin_locked(&dev->vblank_time_lock);
 
vblank->last = last;
 
/* All writers hold the spinlock, but readers are serialized by
* the latching of vblank->count below.
*/
tslot = vblank->count + vblank_count_inc;
vblanktimestamp(dev, pipe, tslot) = *t_vblank;
 
/*
* vblank timestamp updates are protected on the write side with
* vblank_time_lock, but on the read side done locklessly using a
* sequence-lock on the vblank counter. Ensure correct ordering using
* memory barrriers. We need the barrier both before and also after the
* counter update to synchronize with the next timestamp write.
* The read-side barriers for this are in drm_vblank_count_and_time.
*/
smp_wmb();
vblank->count += vblank_count_inc;
smp_wmb();
}
 
/**
* drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
* @dev: DRM device
* @pipe: index of CRTC for which to reset the timestamp
*
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
* Only to be called from drm_vblank_on().
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe)
{
u32 cur_vblank;
bool rc;
struct timeval t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
 
spin_lock(&dev->vblank_time_lock);
 
/*
* sample the current counter to avoid random jumps
* when drm_vblank_enable() applies the diff
*/
do {
cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
} while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe) && --count > 0);
 
/*
* Only reinitialize corresponding vblank timestamp if high-precision query
* available and didn't fail. Otherwise reinitialize delayed at next vblank
* interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
*/
if (!rc)
t_vblank = (struct timeval) {0, 0};
 
/*
* +1 to make sure user will never see the same
* vblank counter value before and after a modeset
*/
store_vblank(dev, pipe, 1, &t_vblank, cur_vblank);
 
spin_unlock(&dev->vblank_time_lock);
}
 
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @pipe: counter to update
*
* Call back into the driver to update the appropriate vblank counter
* (specified by @pipe). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
* call if necessary.
*
* Only necessary when going from off->on, to account for frames we
* didn't get an interrupt for.
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
unsigned long flags)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank, diff;
bool rc;
struct timeval t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
int framedur_ns = vblank->framedur_ns;
 
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count + 1 events
* here if the register is small or we had vblank interrupts off for
* a long time.
*
* We repeat the hardware vblank counter & timestamp query until
* we get consistent results. This to prevent races between gpu
* updating its hardware counter while we are retrieving the
* corresponding vblank timestamp.
*/
do {
cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, flags);
} while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe) && --count > 0);
 
if (dev->max_vblank_count != 0) {
/* trust the hw counter when it's around */
diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
} else if (rc && framedur_ns) {
const struct timeval *t_old;
u64 diff_ns;
 
t_old = &vblanktimestamp(dev, pipe, vblank->count);
diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
 
/*
* Figure out how many vblanks we've missed based
* on the difference in the timestamps and the
* frame/field duration.
*/
diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
 
if (diff == 0 && flags & DRM_CALLED_FROM_VBLIRQ)
DRM_DEBUG_VBL("crtc %u: Redundant vblirq ignored."
" diff_ns = %lld, framedur_ns = %d)\n",
pipe, (long long) diff_ns, framedur_ns);
} else {
/* some kind of default for drivers w/o accurate vbl timestamping */
diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
}
 
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
 
if (diff == 0) {
WARN_ON_ONCE(cur_vblank != vblank->last);
return;
}
 
/*
* Only reinitialize corresponding vblank timestamp if high-precision query
* available and didn't fail, or we were called from the vblank interrupt.
* Otherwise reinitialize delayed at next vblank interrupt and assign 0
* for now, to mark the vblanktimestamp as invalid.
*/
if (!rc && (flags & DRM_CALLED_FROM_VBLIRQ) == 0)
t_vblank = (struct timeval) {0, 0};
 
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}
 
/*
* Disable vblank irq's on crtc, make sure that last vblank count
* of hardware and corresponding consistent software vblank counter
* are preserved, even if there are any spurious vblank irq's after
* disable.
*/
static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
 
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
* disabled. Needed to prevent races in case of delayed irq's.
*/
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
/*
* Only disable vblank interrupts if they're enabled. This avoids
* calling the ->disable_vblank() operation in atomic context with the
* hardware potentially runtime suspended.
*/
if (vblank->enabled) {
dev->driver->disable_vblank(dev, pipe);
vblank->enabled = false;
}
 
/*
* Always update the count and timestamp to maintain the
* appearance that the counter has been ticking all along until
* this time. This makes the count account for the entire time
* between drm_vblank_on() and drm_vblank_off().
*/
drm_update_vblank_count(dev, pipe, 0);
 
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
 
static void vblank_disable_fn(unsigned long arg)
{
struct drm_vblank_crtc *vblank = (void *)arg;
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
 
if (!dev->vblank_disable_allowed)
return;
 
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
vblank_disable_and_save(dev, pipe);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
 
/**
* drm_vblank_cleanup - cleanup vblank support
* @dev: DRM device
*
* This function cleans up any resources allocated in drm_vblank_init.
*/
void drm_vblank_cleanup(struct drm_device *dev)
{
unsigned int pipe;
 
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
 
for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
WARN_ON(vblank->enabled &&
drm_core_check_feature(dev, DRIVER_MODESET));
 
del_timer_sync(&vblank->disable_timer);
}
 
kfree(dev->vblank);
 
dev->num_crtcs = 0;
}
EXPORT_SYMBOL(drm_vblank_cleanup);
 
/**
* drm_vblank_init - initialize vblank support
* @dev: drm_device
* @num_crtcs: number of crtcs supported by @dev
* @dev: DRM device
* @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
*
78,9 → 350,10
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
{
int i, ret = -ENOMEM;
int ret = -ENOMEM;
unsigned int i;
 
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
95,10 → 368,10
struct drm_vblank_crtc *vblank = &dev->vblank[i];
 
vblank->dev = dev;
vblank->crtc = i;
vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
// setup_timer(&vblank->disable_timer, vblank_disable_fn,
// (unsigned long)vblank);
}
 
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
109,6 → 382,13
else
DRM_INFO("No driver support for vblank timestamp query.\n");
 
/* Must have precise timestamping for reliable vblank instant disable */
if (dev->vblank_disable_immediate && !dev->driver->get_vblank_timestamp) {
dev->vblank_disable_immediate = false;
DRM_INFO("Setting vblank_disable_immediate to false because "
"get_vblank_timestamp == NULL\n");
}
 
dev->vblank_disable_allowed = false;
 
return 0;
119,8 → 399,8
}
EXPORT_SYMBOL(drm_vblank_init);
 
#endif
 
 
irqreturn_t device_irq_handler(struct drm_device *dev)
{
 
181,7 → 461,6
 
if (ret < 0) {
dev->irq_enabled = false;
DRM_ERROR(__FUNCTION__);
} else {
dev->irq = irq;
}
197,22 → 476,7
 
 
 
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high, d;
 
high = divisor >> 32;
if (high) {
unsigned int shift = fls(high);
 
d = divisor >> shift;
dividend >>= shift;
} else
d = divisor;
 
return div_u64(dividend, d);
}
 
/**
* drm_calc_timestamping_constants - calculate vblank timestamp constants
* @crtc: drm_crtc whose timestamp constants should be updated.
227,9 → 491,18
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int linedur_ns = 0, framedur_ns = 0;
int dotclock = mode->crtc_clock;
 
if (!dev->num_crtcs)
return;
 
if (WARN_ON(pipe >= dev->num_crtcs))
return;
 
/* Valid dotclock? */
if (dotclock > 0) {
int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
236,10 → 509,9
 
/*
* Convert scanline length in pixels and video
* dot clock to line duration, frame duration
* and pixel duration in nanoseconds:
* dot clock to line duration and frame duration
* in nanoseconds:
*/
pixeldur_ns = 1000000 / dotclock;
linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
 
249,19 → 521,17
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
framedur_ns /= 2;
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
 
crtc->pixeldur_ns = pixeldur_ns;
crtc->linedur_ns = linedur_ns;
crtc->framedur_ns = framedur_ns;
vblank->linedur_ns = linedur_ns;
vblank->framedur_ns = framedur_ns;
 
DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
crtc->base.id, mode->crtc_htotal,
mode->crtc_vtotal, mode->crtc_vdisplay);
DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
crtc->base.id, dotclock, framedur_ns,
linedur_ns, pixeldur_ns);
DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d\n",
crtc->base.id, dotclock, framedur_ns, linedur_ns);
}
EXPORT_SYMBOL(drm_calc_timestamping_constants);
 
268,7 → 538,7
/**
* drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
* @dev: DRM device
* @crtc: Which CRTC's vblank timestamp to retrieve
* @pipe: index of CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to struct timeval which should receive the timestamp
275,7 → 545,6
* @flags: Flags to pass to driver:
* 0 = Default,
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
* @refcrtc: CRTC which defines scanout timing
* @mode: mode which defines the scanout timings
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
311,21 → 580,22
* DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
*
*/
int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_crtc *refcrtc,
const struct drm_display_mode *mode)
{
struct timeval tv_etime;
int vbl_status;
ktime_t stime, etime;
unsigned int vbl_status;
int ret = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
int vpos, hpos, i;
int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
bool invbl;
int delta_ns, duration_ns;
 
if (crtc < 0 || crtc >= dev->num_crtcs) {
DRM_ERROR("Invalid crtc %d\n", crtc);
if (pipe >= dev->num_crtcs) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
 
335,27 → 605,274
return -EIO;
}
 
/* Durations of frames, lines, pixels in nanoseconds. */
framedur_ns = refcrtc->framedur_ns;
linedur_ns = refcrtc->linedur_ns;
pixeldur_ns = refcrtc->pixeldur_ns;
 
/* If mode timing undefined, just return as no-op:
* Happens during initial modesetting of a crtc.
*/
if (framedur_ns == 0) {
DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
if (mode->crtc_clock == 0) {
DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
return -EAGAIN;
}
 
/* Get current scanout position with system timestamp.
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
* if single query takes longer than max_error nanoseconds.
*
* This guarantees a tight bound on maximum error if
* code gets preempted or delayed for some reason.
*/
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
/*
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
vbl_status = dev->driver->get_scanout_position(dev, pipe, flags,
&vpos, &hpos,
&stime, &etime,
mode);
 
/* Return as no-op if scanout query unsupported or failed. */
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
DRM_DEBUG("crtc %u : scanoutpos query failed [0x%x].\n",
pipe, vbl_status);
return -EIO;
}
 
/* Compute uncertainty in timestamp of scanout position query. */
duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
/* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= *max_error)
break;
}
 
/* Noisy system timing? */
if (i == DRM_TIMESTAMP_MAXRETRIES) {
DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
pipe, duration_ns/1000, *max_error/1000, i);
}
 
/* Return upper bound of timestamp precision error. */
*max_error = duration_ns;
 
/* Check if in vblank area:
* vpos is >=0 in video scanout area, but negative
* within vblank area, counting down the number of lines until
* start of scanout.
*/
if (vbl_status & DRM_SCANOUTPOS_IN_VBLANK)
ret |= DRM_VBLANKTIME_IN_VBLANK;
 
/* Convert scanout position into elapsed time at raw_time query
* since start of scanout at first display scanline. delta_ns
* can be negative if start of scanout hasn't happened yet.
*/
delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
mode->crtc_clock);
 
if (!drm_timestamp_monotonic)
etime = ktime_mono_to_real(etime);
 
/* save this only for debugging purposes */
tv_etime = ktime_to_timeval(etime);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
if (delta_ns < 0)
etime = ktime_add_ns(etime, -delta_ns);
else
etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
 
DRM_DEBUG_VBL("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
pipe, vbl_status, hpos, vpos,
(long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
duration_ns/1000, i);
 
return ret;
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
static struct timeval get_drm_timestamp(void)
{
ktime_t now;
 
now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
return ktime_to_timeval(now);
}
 
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval
* @dev: DRM device
* @pipe: index of CRTC whose vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
* @flags: Flags to pass to driver:
* 0 = Default,
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
*
* Fetches the system timestamp corresponding to the time of the most recent
* vblank interval on specified CRTC. May call into kms-driver to
* compute the timestamp with a high-precision GPU specific method.
*
* Returns zero if timestamp originates from uncorrected do_gettimeofday()
* call, i.e., it isn't very precisely locked to the true vblank.
*
* Returns:
* True if timestamp is considered to be very precise, false otherwise.
*/
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
struct timeval *tvblank, unsigned flags)
{
int ret;
 
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
 
/* Query driver if possible and precision timestamping enabled. */
if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
tvblank, flags);
if (ret > 0)
return true;
}
 
/* GPU high precision timestamp query unsupported or failed.
* Return current monotonic/gettimeofday timestamp as best estimate.
*/
*tvblank = get_drm_timestamp();
 
return false;
}
 
/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
* @pipe: index of CRTC for which to retrieve the counter
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
* This is the legacy version of drm_crtc_vblank_count().
*
* Returns:
* The software vblank counter.
*/
u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
 
return vblank->count;
}
EXPORT_SYMBOL(drm_vblank_count);
 
/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
* This is the native KMS version of drm_vblank_count().
*
* Returns:
* The software vblank counter.
*/
u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
{
return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
 
/**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
* system timestamp corresponding to that vblank counter value.
* @dev: DRM device
* @pipe: index of CRTC whose counter to retrieve
* @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
*
* This is the legacy version of drm_crtc_vblank_count_and_time().
*/
u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int count = DRM_TIMESTAMP_MAXRETRIES;
u32 cur_vblank;
 
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
 
/*
* Vblank timestamps are read lockless. To ensure consistency the vblank
* counter is rechecked and ordering is ensured using memory barriers.
* This works like a seqlock. The write-side barriers are in store_vblank.
*/
do {
cur_vblank = vblank->count;
smp_rmb();
*vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
smp_rmb();
} while (cur_vblank != vblank->count && --count > 0);
 
return cur_vblank;
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
* @pipe: CRTC index
*
* Returns:
* Zero on success or a negative error code on failure.
*/
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int ret = 0;
 
assert_spin_locked(&dev->vbl_lock);
 
spin_lock(&dev->vblank_time_lock);
 
if (!vblank->enabled) {
/*
* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
* until we are done reinitializing master counter and
* timestamps. Filtercode in drm_handle_vblank() will
* prevent double-accounting of same vblank interval.
*/
ret = dev->driver->enable_vblank(dev, pipe);
DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
if (ret)
atomic_dec(&vblank->refcount);
else {
vblank->enabled = true;
drm_update_vblank_count(dev, pipe, 0);
}
}
 
spin_unlock(&dev->vblank_time_lock);
 
return ret;
}
 
/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
* @crtc: which CRTC to own
* @pipe: index of CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
363,22 → 880,24
* This is the legacy version of drm_crtc_vblank_get().
*
* Returns:
* Zero on success, nonzero on failure.
* Zero on success or a negative error code on failure.
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
int ret = 0;
#if 0
 
if (WARN_ON(crtc >= dev->num_crtcs))
if (!dev->num_crtcs)
return -EINVAL;
 
if (WARN_ON(pipe >= dev->num_crtcs))
return -EINVAL;
 
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &vblank->refcount) == 1) {
ret = drm_vblank_enable(dev, crtc);
ret = drm_vblank_enable(dev, pipe);
} else {
if (!vblank->enabled) {
atomic_dec(&vblank->refcount);
386,7 → 905,7
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
#endif
 
return ret;
}
EXPORT_SYMBOL(drm_vblank_get);
398,10 → 917,10
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
* This is the native kms version of drm_vblank_off().
* This is the native kms version of drm_vblank_get().
*
* Returns:
* Zero on success, nonzero on failure.
* Zero on success or a negative error code on failure.
*/
int drm_crtc_vblank_get(struct drm_crtc *crtc)
{
410,9 → 929,9
EXPORT_SYMBOL(drm_crtc_vblank_get);
 
/**
* drm_vblank_put - give up ownership of vblank events
* drm_vblank_put - release ownership of vblank events
* @dev: DRM device
* @crtc: which counter to give up
* @pipe: index of CRTC to release
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
419,15 → 938,14
*
* This is the legacy version of drm_crtc_vblank_put().
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
#if 0
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
if (WARN_ON(atomic_read(&vblank->refcount) == 0))
if (WARN_ON(pipe >= dev->num_crtcs))
return;
 
if (WARN_ON(crtc >= dev->num_crtcs))
if (WARN_ON(atomic_read(&vblank->refcount) == 0))
return;
 
/* Last user schedules interrupt disable */
440,7 → 958,6
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
#endif
}
EXPORT_SYMBOL(drm_vblank_put);
 
462,13 → 979,13
/**
* drm_wait_one_vblank - wait for one vblank
* @dev: DRM device
* @crtc: crtc index
* @pipe: CRTC index
*
* This waits for one vblank to pass on @crtc, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
* This waits for one vblank to pass on @pipe, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @pipe is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*/
void drm_wait_one_vblank(struct drm_device *dev, int crtc)
void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
#if 0
int ret;
508,7 → 1025,7
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
* @pipe: CRTC index
*
* Drivers can use this function to shut down the vblank interrupt handling when
* disabling a crtc. This function ensures that the latest vblank frame count is
519,9 → 1036,9
*
* This is the legacy version of drm_crtc_vblank_off().
*/
void drm_vblank_off(struct drm_device *dev, int crtc)
void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
551,9 → 1068,41
EXPORT_SYMBOL(drm_crtc_vblank_off);
 
/**
* drm_crtc_vblank_reset - reset vblank state to off on a CRTC
* @crtc: CRTC in question
*
* Drivers can use this function to reset the vblank state to off at load time.
* Drivers should use this together with the drm_crtc_vblank_off() and
* drm_crtc_vblank_on() functions. The difference compared to
* drm_crtc_vblank_off() is that this function doesn't save the vblank counter
* and hence doesn't need to call any driver hooks.
*/
void drm_crtc_vblank_reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned long irqflags;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/*
* Prevent subsequent drm_vblank_get() from enabling the vblank
* interrupt by bumping the refcount.
*/
if (!vblank->inmodeset) {
atomic_inc(&vblank->refcount);
vblank->inmodeset = 1;
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
WARN_ON(!list_empty(&dev->vblank_event_list));
}
EXPORT_SYMBOL(drm_crtc_vblank_reset);
 
/**
* drm_vblank_on - enable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
* @pipe: CRTC index
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
562,12 → 1111,34
*
* This is the legacy version of drm_crtc_vblank_on().
*/
void drm_vblank_on(struct drm_device *dev, int crtc)
void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
 
dbgprintf("%s pipe %d dev->num_crtcs %d\n", pipe,dev->num_crtcs);\
 
if (WARN_ON(pipe >= dev->num_crtcs))
return;
 
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
vblank->inmodeset = 0;
}
 
drm_reset_vblank_timestamp(dev, pipe);
 
/*
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
if (atomic_read(&vblank->refcount) != 0 ||
(!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_on);
 
/**
590,7 → 1161,7
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
* @pipe: CRTC index
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
610,14 → 1181,15
* Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
* again.
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
{
#if 0
/* vblank is not initialized (IRQ not installed ?) */
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
 
if (WARN_ON(crtc >= dev->num_crtcs))
if (WARN_ON(pipe >= dev->num_crtcs))
return;
 
/*
629,10 → 1201,9
*/
if (!vblank->inmodeset) {
vblank->inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
if (drm_vblank_get(dev, pipe) == 0)
vblank->inmodeset |= 0x2;
}
#endif
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
 
639,14 → 1210,14
/**
* drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
* @dev: DRM device
* @crtc: CRTC in question
* @pipe: CRTC index
*
* This function again drops the temporary vblank reference acquired in
* drm_vblank_pre_modeset.
*/
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
{
#if 0
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
 
/* vblank is not initialized (IRQ not installed ?), or has been freed */
653,6 → 1224,9
if (!dev->num_crtcs)
return;
 
if (WARN_ON(pipe >= dev->num_crtcs))
return;
 
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
659,12 → 1233,26
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
if (vblank->inmodeset & 0x2)
drm_vblank_put(dev, crtc);
drm_vblank_put(dev, pipe);
 
vblank->inmodeset = 0;
}
#endif
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
 
 
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high, d;
 
high = divisor >> 32;
if (high) {
unsigned int shift = fls(high);
 
d = divisor >> shift;
dividend >>= shift;
} else
d = divisor;
 
return div_u64(dividend, d);
}
/drivers/video/drm/drm_legacy.h
42,7 → 42,7
#define DRM_KERNEL_CONTEXT 0
#define DRM_RESERVED_CONTEXTS 1
 
int drm_legacy_ctxbitmap_init(struct drm_device *dev);
void drm_legacy_ctxbitmap_init(struct drm_device *dev);
void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
/drivers/video/drm/drm_mipi_dsi.c
0,0 → 1,688
/*
* MIPI DSI Bus
*
* Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
* Andrzej Hajda <a.hajda@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include <drm/drmP.h>
#include <drm/drm_mipi_dsi.h>
 
#include <linux/device.h>
#include <linux/module.h>
//#include <linux/of_device.h>
#include <linux/pm_runtime.h>
//#include <linux/slab.h>
 
#include <video/mipi_display.h>
 
 
/**
* mipi_dsi_attach - attach a DSI device to its DSI host
* @dsi: DSI peripheral
*/
int mipi_dsi_attach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
 
if (!ops || !ops->attach)
return -ENOSYS;
 
return ops->attach(dsi->host, dsi);
}
EXPORT_SYMBOL(mipi_dsi_attach);
 
static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
struct mipi_dsi_msg *msg)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
 
if (!ops || !ops->transfer)
return -ENOSYS;
 
if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
msg->flags |= MIPI_DSI_MSG_USE_LPM;
 
return ops->transfer(dsi->host, msg);
}
 
/**
* mipi_dsi_packet_format_is_short - check if a packet is of the short format
* @type: MIPI DSI data type of the packet
*
* Return: true if the packet for the given data type is a short packet, false
* otherwise.
*/
bool mipi_dsi_packet_format_is_short(u8 type)
{
switch (type) {
case MIPI_DSI_V_SYNC_START:
case MIPI_DSI_V_SYNC_END:
case MIPI_DSI_H_SYNC_START:
case MIPI_DSI_H_SYNC_END:
case MIPI_DSI_END_OF_TRANSMISSION:
case MIPI_DSI_COLOR_MODE_OFF:
case MIPI_DSI_COLOR_MODE_ON:
case MIPI_DSI_SHUTDOWN_PERIPHERAL:
case MIPI_DSI_TURN_ON_PERIPHERAL:
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_READ:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
return true;
}
 
return false;
}
EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
 
/**
* mipi_dsi_packet_format_is_long - check if a packet is of the long format
* @type: MIPI DSI data type of the packet
*
* Return: true if the packet for the given data type is a long packet, false
* otherwise.
*/
bool mipi_dsi_packet_format_is_long(u8 type)
{
switch (type) {
case MIPI_DSI_NULL_PACKET:
case MIPI_DSI_BLANKING_PACKET:
case MIPI_DSI_GENERIC_LONG_WRITE:
case MIPI_DSI_DCS_LONG_WRITE:
case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
case MIPI_DSI_PACKED_PIXEL_STREAM_30:
case MIPI_DSI_PACKED_PIXEL_STREAM_36:
case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12:
case MIPI_DSI_PACKED_PIXEL_STREAM_16:
case MIPI_DSI_PACKED_PIXEL_STREAM_18:
case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
case MIPI_DSI_PACKED_PIXEL_STREAM_24:
return true;
}
 
return false;
}
EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
 
/**
* mipi_dsi_create_packet - create a packet from a message according to the
* DSI protocol
* @packet: pointer to a DSI packet structure
* @msg: message to translate into a packet
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
const struct mipi_dsi_msg *msg)
{
if (!packet || !msg)
return -EINVAL;
 
/* do some minimum sanity checking */
if (!mipi_dsi_packet_format_is_short(msg->type) &&
!mipi_dsi_packet_format_is_long(msg->type))
return -EINVAL;
 
if (msg->channel > 3)
return -EINVAL;
 
memset(packet, 0, sizeof(*packet));
packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
 
/* TODO: compute ECC if hardware support is not available */
 
/*
* Long write packets contain the word count in header bytes 1 and 2.
* The payload follows the header and is word count bytes long.
*
* Short write packets encode up to two parameters in header bytes 1
* and 2.
*/
if (mipi_dsi_packet_format_is_long(msg->type)) {
packet->header[1] = (msg->tx_len >> 0) & 0xff;
packet->header[2] = (msg->tx_len >> 8) & 0xff;
 
packet->payload_length = msg->tx_len;
packet->payload = msg->tx_buf;
} else {
const u8 *tx = msg->tx_buf;
 
packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
}
 
packet->size = sizeof(packet->header) + packet->payload_length;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_create_packet);
 
/*
* mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
* the payload in a long packet transmitted from the peripheral back to the
* host processor
* @dsi: DSI peripheral device
* @value: the maximum size of the payload
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value)
{
u8 tx[2] = { value & 0xff, value >> 8 };
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
.tx_len = sizeof(tx),
.tx_buf = tx,
};
 
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
 
/**
* mipi_dsi_generic_write() - transmit data using a generic write packet
* @dsi: DSI peripheral device
* @payload: buffer containing the payload
* @size: size of payload buffer
*
* This function will automatically choose the right data type depending on
* the payload length.
*
* Return: The number of bytes transmitted on success or a negative error code
* on failure.
*/
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.tx_buf = payload,
.tx_len = size
};
 
switch (size) {
case 0:
msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
break;
 
case 1:
msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
break;
 
case 2:
msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
break;
 
default:
msg.type = MIPI_DSI_GENERIC_LONG_WRITE;
break;
}
 
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_generic_write);
 
/**
* mipi_dsi_generic_read() - receive data using a generic read packet
* @dsi: DSI peripheral device
* @params: buffer containing the request parameters
* @num_params: number of request parameters
* @data: buffer in which to return the received data
* @size: size of receive buffer
*
* This function will automatically choose the right data type depending on
* the number of parameters passed in.
*
* Return: The number of bytes successfully read or a negative error code on
* failure.
*/
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.tx_len = num_params,
.tx_buf = params,
.rx_len = size,
.rx_buf = data
};
 
switch (num_params) {
case 0:
msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
break;
 
case 1:
msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
break;
 
case 2:
msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
break;
 
default:
return -EINVAL;
}
 
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_generic_read);
 
/**
* mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @data: buffer containing data to be transmitted
* @len: size of transmission buffer
*
* This function will automatically choose the right data type depending on
* the command payload length.
*
* Return: The number of bytes successfully transmitted or a negative error
* code on failure.
*/
ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
const void *data, size_t len)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.tx_buf = data,
.tx_len = len
};
 
switch (len) {
case 0:
return -EINVAL;
 
case 1:
msg.type = MIPI_DSI_DCS_SHORT_WRITE;
break;
 
case 2:
msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
break;
 
default:
msg.type = MIPI_DSI_DCS_LONG_WRITE;
break;
}
 
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer);
 
/**
* mipi_dsi_dcs_write() - send DCS write command
* @dsi: DSI peripheral device
* @cmd: DCS command
* @data: buffer containing the command payload
* @len: command payload length
*
* This function will automatically choose the right data type depending on
* the command payload length.
*
* Return: The number of bytes successfully transmitted or a negative error
* code on failure.
*/
ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
const void *data, size_t len)
{
ssize_t err;
size_t size;
u8 *tx;
 
if (len > 0) {
size = 1 + len;
 
tx = kmalloc(size, GFP_KERNEL);
if (!tx)
return -ENOMEM;
 
/* concatenate the DCS command byte and the payload */
tx[0] = cmd;
memcpy(&tx[1], data, len);
} else {
tx = &cmd;
size = 1;
}
 
err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
 
if (len > 0)
kfree(tx);
 
return err;
}
EXPORT_SYMBOL(mipi_dsi_dcs_write);
 
/**
* mipi_dsi_dcs_read() - send DCS read request command
* @dsi: DSI peripheral device
* @cmd: DCS command
* @data: buffer in which to receive data
* @len: size of receive buffer
*
* Return: The number of bytes read or a negative error code on failure.
*/
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
.tx_buf = &cmd,
.tx_len = 1,
.rx_buf = data,
.rx_len = len
};
 
return mipi_dsi_device_transfer(dsi, &msg);
}
EXPORT_SYMBOL(mipi_dsi_dcs_read);
 
/**
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
{
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_NOP, NULL, 0);
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_nop);
 
/**
* mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
{
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SOFT_RESET, NULL, 0);
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset);
 
/**
* mipi_dsi_dcs_get_power_mode() - query the display module's current power
* mode
* @dsi: DSI peripheral device
* @mode: return location for the current power mode
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode)
{
ssize_t err;
 
err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_POWER_MODE, mode,
sizeof(*mode));
if (err <= 0) {
if (err == 0)
err = -ENODATA;
 
return err;
}
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_power_mode);
 
/**
* mipi_dsi_dcs_get_pixel_format() - gets the pixel format for the RGB image
* data used by the interface
* @dsi: DSI peripheral device
* @format: return location for the pixel format
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format)
{
ssize_t err;
 
err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_PIXEL_FORMAT, format,
sizeof(*format));
if (err <= 0) {
if (err == 0)
err = -ENODATA;
 
return err;
}
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
 
/**
* mipi_dsi_dcs_enter_sleep_mode() - disable all unnecessary blocks inside the
* display module except interface communication
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
{
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
 
/**
* mipi_dsi_dcs_exit_sleep_mode() - enable all blocks inside the display
* module
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
{
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
 
/**
* mipi_dsi_dcs_set_display_off() - stop displaying the image data on the
* display device
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
{
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
 
/**
* mipi_dsi_dcs_set_display_on() - start displaying the image data on the
* display device
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
{
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
 
/**
* mipi_dsi_dcs_set_column_address() - define the column extent of the frame
* memory accessed by the host processor
* @dsi: DSI peripheral device
* @start: first column of frame memory
* @end: last column of frame memory
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end)
{
u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_COLUMN_ADDRESS, payload,
sizeof(payload));
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
 
/**
* mipi_dsi_dcs_set_page_address() - define the page extent of the frame
* memory accessed by the host processor
* @dsi: DSI peripheral device
* @start: first page of frame memory
* @end: last page of frame memory
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end)
{
u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PAGE_ADDRESS, payload,
sizeof(payload));
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
 
/**
* mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
* output signal on the TE signal line
* @dsi: DSI peripheral device
*
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
{
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
 
/**
* mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
* output signal on the TE signal line.
* @dsi: DSI peripheral device
* @mode: the Tearing Effect Output Line mode
*
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode)
{
u8 value = mode;
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_ON, &value,
sizeof(value));
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
 
/**
* mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
* data used by the interface
* @dsi: DSI peripheral device
* @format: pixel format
*
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
{
ssize_t err;
 
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
sizeof(format));
if (err < 0)
return err;
 
return 0;
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
 
MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
MODULE_DESCRIPTION("MIPI DSI Bus");
MODULE_LICENSE("GPL and additional rights");
/drivers/video/drm/drm_mm.c
1,4 → 1,4
/**************************************************************************
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
* All Rights Reserved.
91,29 → 91,29
*/
 
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
u64 start,
u64 end,
enum drm_mm_search_flags flags);
 
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
u64 size, unsigned alignment,
unsigned long color,
enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
u64 hole_start = drm_mm_hole_node_start(hole_node);
u64 hole_end = drm_mm_hole_node_end(hole_node);
u64 adj_start = hole_start;
u64 adj_end = hole_end;
 
BUG_ON(node->allocated);
 
124,12 → 124,15
adj_start = adj_end - size;
 
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp) {
u64 tmp = adj_start;
unsigned rem;
 
rem = do_div(tmp, alignment);
if (rem) {
if (flags & DRM_MM_CREATE_TOP)
adj_start -= tmp;
adj_start -= rem;
else
adj_start += alignment - tmp;
adj_start += alignment - rem;
}
}
 
176,9 → 179,9
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
unsigned long end = node->start + node->size;
unsigned long hole_start;
unsigned long hole_end;
u64 end = node->start + node->size;
u64 hole_start;
u64 hole_end;
 
BUG_ON(node == NULL);
 
227,7 → 230,7
* 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
u64 size, unsigned alignment,
unsigned long color,
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags)
246,16 → 249,16
 
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
u64 size, unsigned alignment,
unsigned long color,
unsigned long start, unsigned long end,
u64 start, u64 end,
enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
u64 hole_start = drm_mm_hole_node_start(hole_node);
u64 hole_end = drm_mm_hole_node_end(hole_node);
u64 adj_start = hole_start;
u64 adj_end = hole_end;
 
BUG_ON(!hole_node->hole_follows || node->allocated);
 
264,19 → 267,22
if (adj_end > end)
adj_end = end;
 
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
if (flags & DRM_MM_CREATE_TOP)
adj_start = adj_end - size;
 
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
u64 tmp = adj_start;
unsigned rem;
 
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp) {
rem = do_div(tmp, alignment);
if (rem) {
if (flags & DRM_MM_CREATE_TOP)
adj_start -= tmp;
adj_start -= rem;
else
adj_start += alignment - tmp;
adj_start += alignment - rem;
}
}
 
324,9 → 330,9
* 0 on success, -ENOSPC if there's no suitable hole.
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
u64 size, unsigned alignment,
unsigned long color,
unsigned long start, unsigned long end,
u64 start, u64 end,
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags)
{
387,16 → 393,18
}
EXPORT_SYMBOL(drm_mm_remove_node);
 
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
{
if (end - start < size)
return 0;
 
if (alignment) {
unsigned tmp = start % alignment;
if (tmp)
start += alignment - tmp;
u64 tmp = start;
unsigned rem;
 
rem = do_div(tmp, alignment);
if (rem)
start += alignment - rem;
}
 
return end >= start + size;
403,7 → 411,7
}
 
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags)
410,9 → 418,9
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long adj_start;
unsigned long adj_end;
unsigned long best_size;
u64 adj_start;
u64 adj_end;
u64 best_size;
 
BUG_ON(mm->scanned_blocks);
 
421,7 → 429,7
 
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
unsigned long hole_size = adj_end - adj_start;
u64 hole_size = adj_end - adj_start;
 
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
445,18 → 453,18
}
 
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
u64 start,
u64 end,
enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long adj_start;
unsigned long adj_end;
unsigned long best_size;
u64 adj_start;
u64 adj_end;
u64 best_size;
 
BUG_ON(mm->scanned_blocks);
 
465,7 → 473,7
 
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
unsigned long hole_size = adj_end - adj_start;
u64 hole_size = adj_end - adj_start;
 
if (adj_start < start)
adj_start = start;
561,7 → 569,7
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color)
{
594,11 → 602,11
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end)
u64 start,
u64 end)
{
mm->scan_color = color;
mm->scan_alignment = alignment;
627,8 → 635,8
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
unsigned long adj_start, adj_end;
u64 hole_start, hole_end;
u64 adj_start, adj_end;
 
mm->scanned_blocks++;
 
731,7 → 739,7
*
* Note that @mm must be cleared to 0 before calling this function.
*/
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
{
INIT_LIST_HEAD(&mm->hole_stack);
mm->scanned_blocks = 0;
766,18 → 774,17
}
EXPORT_SYMBOL(drm_mm_takedown);
 
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
const char *prefix)
{
unsigned long hole_start, hole_end, hole_size;
u64 hole_start, hole_end, hole_size;
 
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
hole_end, hole_size);
return hole_size;
}
 
792,35 → 799,34
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
unsigned long total_used = 0, total_free = 0, total = 0;
u64 total_used = 0, total_free = 0, total = 0;
 
total_free += drm_mm_debug_hole(&mm->head_node, prefix);
 
drm_mm_for_each_node(entry, mm) {
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
entry->size);
pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
entry->start + entry->size, entry->size);
total_used += entry->size;
total_free += drm_mm_debug_hole(entry, prefix);
}
total = total_free + total_used;
 
printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
 
#if defined(CONFIG_DEBUG_FS)
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
{
unsigned long hole_start, hole_end, hole_size;
u64 hole_start, hole_end, hole_size;
 
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
hole_start, hole_end, hole_size);
seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
hole_end, hole_size);
return hole_size;
}
 
835,20 → 841,20
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
unsigned long total_used = 0, total_free = 0, total = 0;
u64 total_used = 0, total_free = 0, total = 0;
 
total_free += drm_mm_dump_hole(m, &mm->head_node);
 
drm_mm_for_each_node(entry, mm) {
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
entry->start, entry->start + entry->size,
entry->size);
seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
entry->start + entry->size, entry->size);
total_used += entry->size;
total_free += drm_mm_dump_hole(m, entry);
}
total = total_free + total_used;
 
seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
seq_printf(m, "total: %llu, used %llu free %llu\n", total,
total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
/drivers/video/drm/drm_modes.c
35,6 → 35,8
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
#include <drm/drm_modes.h>
 
#include "drm_crtc_internal.h"
276,7 → 278,7
hblank = drm_mode->hdisplay * hblank_percentage /
(100 * HV_FACTOR - hblank_percentage);
hblank -= hblank % (2 * CVT_H_GRANULARITY);
/* 14. find the total pixes per line */
/* 14. find the total pixels per line */
drm_mode->htotal = drm_mode->hdisplay + hblank;
drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
drm_mode->hsync_start = drm_mode->hsync_end -
613,6 → 615,46
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
 
/**
* drm_display_mode_to_videomode - fill in @vm using @dmode,
* @dmode: drm_display_mode structure to use as source
* @vm: videomode structure to use as destination
*
* Fills out @vm using the display mode specified in @dmode.
*/
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm)
{
vm->hactive = dmode->hdisplay;
vm->hfront_porch = dmode->hsync_start - dmode->hdisplay;
vm->hsync_len = dmode->hsync_end - dmode->hsync_start;
vm->hback_porch = dmode->htotal - dmode->hsync_end;
 
vm->vactive = dmode->vdisplay;
vm->vfront_porch = dmode->vsync_start - dmode->vdisplay;
vm->vsync_len = dmode->vsync_end - dmode->vsync_start;
vm->vback_porch = dmode->vtotal - dmode->vsync_end;
 
vm->pixelclock = dmode->clock * 1000;
 
vm->flags = 0;
if (dmode->flags & DRM_MODE_FLAG_PHSYNC)
vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
else if (dmode->flags & DRM_MODE_FLAG_NHSYNC)
vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
if (dmode->flags & DRM_MODE_FLAG_PVSYNC)
vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
else if (dmode->flags & DRM_MODE_FLAG_NVSYNC)
vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
if (dmode->flags & DRM_MODE_FLAG_INTERLACE)
vm->flags |= DISPLAY_FLAGS_INTERLACED;
if (dmode->flags & DRM_MODE_FLAG_DBLSCAN)
vm->flags |= DISPLAY_FLAGS_DOUBLESCAN;
if (dmode->flags & DRM_MODE_FLAG_DBLCLK)
vm->flags |= DISPLAY_FLAGS_DOUBLECLK;
}
EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
 
#ifdef CONFIG_OF
/**
* of_get_drm_display_mode - get a drm_display_mode from devicetree
737,6 → 779,8
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full").
* - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not*
* be performed for doublescan and vscan > 1 modes respectively.
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
763,6 → 807,7
}
}
 
if (!(adjust_flags & CRTC_NO_DBLSCAN)) {
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p->crtc_vdisplay *= 2;
p->crtc_vsync_start *= 2;
769,7 → 814,9
p->crtc_vsync_end *= 2;
p->crtc_vtotal *= 2;
}
}
 
if (!(adjust_flags & CRTC_NO_VSCAN)) {
if (p->vscan > 1) {
p->crtc_vdisplay *= p->vscan;
p->crtc_vsync_start *= p->vscan;
776,6 → 823,7
p->crtc_vsync_end *= p->vscan;
p->crtc_vtotal *= p->vscan;
}
}
 
if (adjust_flags & CRTC_STEREO_DOUBLE) {
unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
855,6 → 903,12
*/
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
if (!mode1 && !mode2)
return true;
 
if (!mode1 || !mode2)
return false;
 
/* do clock check convert to PICOS so fb modes get matched
* the same */
if (mode1->clock && mode2->clock) {
904,9 → 958,40
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
 
/**
* drm_mode_validate_basic - make sure the mode is somewhat sane
* @mode: mode to check
*
* Check that the mode timings are at least somewhat reasonable.
* Any hardware specific limits are left up for each driver to check.
*
* Returns:
* The mode status
*/
enum drm_mode_status
drm_mode_validate_basic(const struct drm_display_mode *mode)
{
if (mode->clock == 0)
return MODE_CLOCK_LOW;
 
if (mode->hdisplay == 0 ||
mode->hsync_start < mode->hdisplay ||
mode->hsync_end < mode->hsync_start ||
mode->htotal < mode->hsync_end)
return MODE_H_ILLEGAL;
 
if (mode->vdisplay == 0 ||
mode->vsync_start < mode->vdisplay ||
mode->vsync_end < mode->vsync_start ||
mode->vtotal < mode->vsync_end)
return MODE_V_ILLEGAL;
 
return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_basic);
 
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
* @dev: DRM device
* @mode_list: list of modes to check
* @mode: mode to check
* @maxX: maximum width
* @maxY: maximum height
*
914,23 → 999,80
* limitations of the DRM device/connector. If a mode is too big its status
* member is updated with the appropriate validation failure code. The list
* itself is not changed.
*
* Returns:
* The mode status
*/
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
enum drm_mode_status
drm_mode_validate_size(const struct drm_display_mode *mode,
int maxX, int maxY)
{
struct drm_display_mode *mode;
 
list_for_each_entry(mode, mode_list, head) {
if (maxX > 0 && mode->hdisplay > maxX)
mode->status = MODE_VIRTUAL_X;
return MODE_VIRTUAL_X;
 
if (maxY > 0 && mode->vdisplay > maxY)
mode->status = MODE_VIRTUAL_Y;
return MODE_VIRTUAL_Y;
 
return MODE_OK;
}
}
EXPORT_SYMBOL(drm_mode_validate_size);
 
#define MODE_STATUS(status) [MODE_ ## status + 3] = #status
 
static const char * const drm_mode_status_names[] = {
MODE_STATUS(OK),
MODE_STATUS(HSYNC),
MODE_STATUS(VSYNC),
MODE_STATUS(H_ILLEGAL),
MODE_STATUS(V_ILLEGAL),
MODE_STATUS(BAD_WIDTH),
MODE_STATUS(NOMODE),
MODE_STATUS(NO_INTERLACE),
MODE_STATUS(NO_DBLESCAN),
MODE_STATUS(NO_VSCAN),
MODE_STATUS(MEM),
MODE_STATUS(VIRTUAL_X),
MODE_STATUS(VIRTUAL_Y),
MODE_STATUS(MEM_VIRT),
MODE_STATUS(NOCLOCK),
MODE_STATUS(CLOCK_HIGH),
MODE_STATUS(CLOCK_LOW),
MODE_STATUS(CLOCK_RANGE),
MODE_STATUS(BAD_HVALUE),
MODE_STATUS(BAD_VVALUE),
MODE_STATUS(BAD_VSCAN),
MODE_STATUS(HSYNC_NARROW),
MODE_STATUS(HSYNC_WIDE),
MODE_STATUS(HBLANK_NARROW),
MODE_STATUS(HBLANK_WIDE),
MODE_STATUS(VSYNC_NARROW),
MODE_STATUS(VSYNC_WIDE),
MODE_STATUS(VBLANK_NARROW),
MODE_STATUS(VBLANK_WIDE),
MODE_STATUS(PANEL),
MODE_STATUS(INTERLACE_WIDTH),
MODE_STATUS(ONE_WIDTH),
MODE_STATUS(ONE_HEIGHT),
MODE_STATUS(ONE_SIZE),
MODE_STATUS(NO_REDUCED),
MODE_STATUS(NO_STEREO),
MODE_STATUS(UNVERIFIED),
MODE_STATUS(BAD),
MODE_STATUS(ERROR),
};
 
#undef MODE_STATUS
 
static const char *drm_get_mode_status_name(enum drm_mode_status status)
{
int index = status + 3;
 
if (WARN_ON(index < 0 || index >= ARRAY_SIZE(drm_mode_status_names)))
return "";
 
return drm_mode_status_names[index];
}
 
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
952,8 → 1094,9
list_del(&mode->head);
if (verbose) {
drm_mode_debug_printmodeline(mode);
DRM_DEBUG_KMS("Not using %s mode %d\n",
mode->name, mode->status);
DRM_DEBUG_KMS("Not using %s mode: %s\n",
mode->name,
drm_get_mode_status_name(mode->status));
}
drm_mode_destroy(dev, mode);
}
1011,7 → 1154,7
/**
* drm_mode_connector_list_update - update the mode list for the connector
* @connector: the connector to update
* @merge_type_bits: whether to merge or overright type bits.
* @merge_type_bits: whether to merge or overwrite type bits
*
* This moves the modes from the @connector probed_modes list
* to the actual mode list. It compares the probed mode against the current
1055,3 → 1198,89
}
}
EXPORT_SYMBOL(drm_mode_connector_list_update);
/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in)
{
WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
"timing values too large for mode info\n");
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
 
/**
* drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
{
int ret = -EINVAL;
 
if (in->clock > INT_MAX || in->vrefresh > INT_MAX) {
ret = -ERANGE;
goto out;
}
 
if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
goto out;
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 
out->status = drm_mode_validate_basic(out);
if (out->status != MODE_OK)
goto out;
 
ret = 0;
 
out:
return ret;
}
/drivers/video/drm/drm_modeset_lock.c
55,39 → 55,27
* drm_modeset_acquire_fini(&ctx);
*/
 
 
/**
* __drm_modeset_lock_all - internal helper to grab all modeset locks
* @dev: DRM device
* @trylock: trylock mode for atomic contexts
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This is a special version of drm_modeset_lock_all() which can also be used in
* atomic contexts. Then @trylock must be set to true.
*
* Returns:
* 0 on success or negative error code on failure.
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
*/
int __drm_modeset_lock_all(struct drm_device *dev,
bool trylock)
void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
 
ctx = kzalloc(sizeof(*ctx),
trylock ? GFP_ATOMIC : GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (WARN_ON(!ctx))
return;
 
if (trylock) {
if (!mutex_trylock(&config->mutex))
return -EBUSY;
} else {
mutex_lock(&config->mutex);
}
 
drm_modeset_acquire_init(ctx, 0);
ctx->trylock_only = trylock;
 
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
106,7 → 94,7
 
drm_warn_on_modeset_not_all_locked(dev);
 
return 0;
return;
 
fail:
if (ret == -EDEADLK) {
114,22 → 102,8
goto retry;
}
 
return ret;
kfree(ctx);
}
EXPORT_SYMBOL(__drm_modeset_lock_all);
 
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
 
/**
269,10 → 243,10
struct drm_crtc *crtc;
 
/* Locking is currently fubar in the panic handler. */
// if (oops_in_progress)
// return;
if (oops_in_progress)
return;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
drm_for_each_crtc(crtc, dev)
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
333,6 → 307,8
WARN_ON(ctx->contended);
 
if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx);
 
if (!ww_mutex_trylock(&lock->mutex))
return -EBUSY;
else
460,18 → 436,17
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_crtc *crtc;
struct drm_plane *plane;
int ret = 0;
 
list_for_each_entry(crtc, &config->crtc_list, head) {
drm_for_each_crtc(crtc, dev) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
}
 
list_for_each_entry(plane, &config->plane_list, head) {
drm_for_each_plane(plane, dev) {
ret = drm_modeset_lock(&plane->mutex, ctx);
if (ret)
return ret;
/drivers/video/drm/drm_panel.c
0,0 → 1,100
/*
* Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/err.h>
#include <linux/module.h>
 
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
 
static DEFINE_MUTEX(panel_lock);
static LIST_HEAD(panel_list);
 
void drm_panel_init(struct drm_panel *panel)
{
INIT_LIST_HEAD(&panel->list);
}
EXPORT_SYMBOL(drm_panel_init);
 
int drm_panel_add(struct drm_panel *panel)
{
mutex_lock(&panel_lock);
list_add_tail(&panel->list, &panel_list);
mutex_unlock(&panel_lock);
 
return 0;
}
EXPORT_SYMBOL(drm_panel_add);
 
void drm_panel_remove(struct drm_panel *panel)
{
mutex_lock(&panel_lock);
list_del_init(&panel->list);
mutex_unlock(&panel_lock);
}
EXPORT_SYMBOL(drm_panel_remove);
 
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
{
if (panel->connector)
return -EBUSY;
 
panel->connector = connector;
panel->drm = connector->dev;
 
return 0;
}
EXPORT_SYMBOL(drm_panel_attach);
 
int drm_panel_detach(struct drm_panel *panel)
{
panel->connector = NULL;
panel->drm = NULL;
 
return 0;
}
EXPORT_SYMBOL(drm_panel_detach);
 
#ifdef CONFIG_OF
struct drm_panel *of_drm_find_panel(struct device_node *np)
{
struct drm_panel *panel;
 
mutex_lock(&panel_lock);
 
list_for_each_entry(panel, &panel_list, list) {
if (panel->dev->of_node == np) {
mutex_unlock(&panel_lock);
return panel;
}
}
 
mutex_unlock(&panel_lock);
return NULL;
}
EXPORT_SYMBOL(of_drm_find_panel);
#endif
 
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("DRM panel infrastructure");
MODULE_LICENSE("GPL and additional rights");
/drivers/video/drm/drm_pci.c
27,6 → 27,7
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include "drm_internal.h"
#include "drm_legacy.h"
 
#include <syscall.h>
291,6 → 292,8
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*
* Return: 0 on success or a negative error code on failure.
*/
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
/drivers/video/drm/drm_plane_helper.c
91,7 → 91,7
*/
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_for_each_connector(connector, dev) {
if (connector->encoder && connector->encoder->crtc == crtc) {
if (connector_list != NULL && count < num_connectors)
*(connector_list++) = connector;
98,6 → 98,7
 
count++;
}
}
 
return count;
}
142,6 → 143,17
{
int hscale, vscale;
 
if (!fb) {
*visible = false;
return 0;
}
 
/* crtc should only be NULL when disabling (i.e., !fb) */
if (WARN_ON(!crtc)) {
*visible = false;
return 0;
}
 
if (!crtc->enabled && !can_update_disabled) {
DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
155,11 → 167,6
return -ERANGE;
}
 
if (!fb) {
*visible = false;
return 0;
}
 
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
338,20 → 345,7
};
EXPORT_SYMBOL(drm_primary_helper_funcs);
 
/**
* drm_primary_helper_create_plane() - Create a generic primary plane
* @dev: drm device
* @formats: pixel formats supported, or NULL for a default safe list
* @num_formats: size of @formats; ignored if @formats is NULL
*
* Allocates and initializes a primary plane that can be used with the primary
* plane helpers. Drivers that wish to use driver-specific plane structures or
* provide custom handler functions may perform their own allocation and
* initialization rather than calling this function.
*/
struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
const uint32_t *formats,
int num_formats)
static struct drm_plane *create_primary_plane(struct drm_device *dev)
{
struct drm_plane *primary;
int ret;
362,15 → 356,17
return NULL;
}
 
if (formats == NULL) {
formats = safe_modeset_formats;
num_formats = ARRAY_SIZE(safe_modeset_formats);
}
/*
* Remove the format_default field from drm_plane when dropping
* this helper.
*/
primary->format_default = true;
 
/* possible_crtc's will be filled in later by crtc_init */
ret = drm_universal_plane_init(dev, primary, 0,
&drm_primary_helper_funcs,
formats, num_formats,
safe_modeset_formats,
ARRAY_SIZE(safe_modeset_formats),
DRM_PLANE_TYPE_PRIMARY);
if (ret) {
kfree(primary);
379,7 → 375,6
 
return primary;
}
EXPORT_SYMBOL(drm_primary_helper_create_plane);
 
/**
* drm_crtc_init - Legacy CRTC initialization function
398,7 → 393,7
{
struct drm_plane *primary;
 
primary = drm_primary_helper_create_plane(dev, NULL, 0);
primary = create_primary_plane(dev);
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
}
EXPORT_SYMBOL(drm_crtc_init);
407,9 → 402,9
struct drm_plane_state *plane_state,
struct drm_framebuffer *old_fb)
{
struct drm_plane_helper_funcs *plane_funcs;
const struct drm_plane_helper_funcs *plane_funcs;
struct drm_crtc *crtc[2];
struct drm_crtc_helper_funcs *crtc_funcs[2];
const struct drm_crtc_helper_funcs *crtc_funcs[2];
int i, ret = 0;
 
plane_funcs = plane->helper_private;
429,8 → 424,10
goto out;
}
 
if (plane_funcs->prepare_fb && plane_state->fb) {
ret = plane_funcs->prepare_fb(plane, plane_state->fb);
if (plane_funcs->prepare_fb && plane_state->fb &&
plane_state->fb != old_fb) {
ret = plane_funcs->prepare_fb(plane,
plane_state);
if (ret)
goto out;
}
440,20 → 437,38
 
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
crtc_funcs[i]->atomic_begin(crtc[i]);
crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
}
 
/*
* Drivers may optionally implement the ->atomic_disable callback, so
* special-case that here.
*/
if (drm_atomic_plane_disabling(plane, plane_state) &&
plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, plane_state);
else
plane_funcs->atomic_update(plane, plane_state);
 
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
crtc_funcs[i]->atomic_flush(crtc[i]);
crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
}
 
/*
* If we only moved the plane and didn't change fb's, there's no need to
* wait for vblank.
*/
if (plane->state->fb == old_fb)
goto out;
 
for (i = 0; i < 2; i++) {
if (!crtc[i])
continue;
 
if (crtc[i]->cursor == plane)
continue;
 
/* There's no other way to figure out whether the crtc is running. */
ret = drm_crtc_vblank_get(crtc[i]);
if (ret == 0) {
464,8 → 479,8
ret = 0;
}
 
if (plane_funcs->cleanup_fb && old_fb)
plane_funcs->cleanup_fb(plane, old_fb);
if (plane_funcs->cleanup_fb)
plane_funcs->cleanup_fb(plane, plane_state);
out:
if (plane_state) {
if (plane->funcs->atomic_destroy_state)
478,7 → 493,7
}
 
/**
* drm_plane_helper_update() - Helper for primary plane update
* drm_plane_helper_update() - Transitional helper for plane update
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
511,12 → 526,15
 
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
else if (plane->state)
else {
if (!plane->state)
drm_atomic_helper_plane_reset(plane);
 
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
else
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
}
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
 
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, fb);
534,7 → 552,7
EXPORT_SYMBOL(drm_plane_helper_update);
 
/**
* drm_plane_helper_disable() - Helper for primary plane disable
* drm_plane_helper_disable() - Transitional helper for plane disable
* @plane: plane to disable
*
* Provides a default plane disable handler using the atomic plane update
557,12 → 575,15
 
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
else if (plane->state)
else {
if (!plane->state)
drm_atomic_helper_plane_reset(plane);
 
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
else
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
}
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
 
plane_state->crtc = NULL;
drm_atomic_set_fb_for_plane(plane_state, NULL);
/drivers/video/drm/drm_probe_helper.c
58,40 → 58,84
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
 
static void drm_mode_validate_flag(struct drm_connector *connector,
static enum drm_mode_status
drm_mode_validate_flag(const struct drm_display_mode *mode,
int flags)
{
struct drm_display_mode *mode;
 
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
DRM_MODE_FLAG_3D_MASK))
return;
 
list_for_each_entry(mode, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
return MODE_NO_INTERLACE;
 
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
return MODE_NO_DBLESCAN;
 
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
!(flags & DRM_MODE_FLAG_3D_MASK))
mode->status = MODE_NO_STEREO;
return MODE_NO_STEREO;
 
return MODE_OK;
}
 
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
{
struct drm_display_mode *mode;
 
if (!connector->cmdline_mode.specified)
return 0;
 
mode = drm_mode_create_from_cmdline_mode(connector->dev,
&connector->cmdline_mode);
if (mode == NULL)
return 0;
 
drm_mode_probed_add(connector, mode);
return 1;
}
 
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
/**
* drm_kms_helper_poll_enable_locked - re-enable output polling.
* @dev: drm_device
*
* This function re-enables the output polling work without
* locking the mode_config mutex.
*
* This is like drm_kms_helper_poll_enable() however it is to be
* called from a context where the mode_config mutex is locked
* already.
*/
void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
return;
 
drm_for_each_connector(connector, dev) {
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
 
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
 
 
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
struct drm_connector_helper_funcs *connector_funcs =
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
int mode_flags = 0;
bool verbose_prune = true;
enum drm_connector_status old_status;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
110,12 → 154,14
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
old_status = connector->status;
 
connector->status = connector->funcs->detect(connector, true);
}
 
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
drm_kms_helper_poll_enable(dev);
drm_kms_helper_poll_enable_locked(dev);
 
dev->mode_config.poll_running = drm_kms_helper_poll;
 
136,6 → 182,7
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
 
count = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
} else
count = (*connector_funcs->get_modes)(connector);
}
147,9 → 194,6
 
drm_mode_connector_list_update(connector, merge_type_bits);
 
if (maxX && maxY)
drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
 
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
156,9 → 200,17
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
drm_mode_validate_flag(connector, mode_flags);
 
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_basic(mode);
 
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_size(mode, maxX, maxY);
 
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_flag(mode, mode_flags);
 
if (mode->status == MODE_OK && connector_funcs->mode_valid)
mode->status = connector_funcs->mode_valid(connector,
mode);
251,7 → 303,6
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
258,13 → 309,17
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status;
bool repoll = false, changed = false;
bool repoll = false, changed;
 
/* Pick up any changes detected by the probe functions. */
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
 
if (!drm_kms_helper_poll)
return;
goto out;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_for_each_connector(connector, dev) {
 
/* Ignore forced connectors. */
if (connector->force)
275,8 → 330,6
if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
continue;
 
repoll = true;
 
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
284,10 → 337,30
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
 
repoll = true;
 
connector->status = connector->funcs->detect(connector, false);
if (old_status != connector->status) {
const char *old, *new;
 
/*
* The poll work sets force=false when calling detect so
* that drivers can avoid to do disruptive tests (e.g.
* when load detect cycles could cause flickering on
* other, running displays). This bears the risk that we
* flip-flop between unknown here in the poll work and
* the real state when userspace forces a full detect
* call after receiving a hotplug event due to this
* change.
*
* Hence clamp an unknown detect status to the old
* value.
*/
if (connector->status == connector_status_unknown) {
connector->status = old_status;
continue;
}
 
old = drm_get_connector_status_name(old_status);
new = drm_get_connector_status_name(connector->status);
 
303,6 → 376,8
 
mutex_unlock(&dev->mode_config.mutex);
 
out: ;
 
// if (changed)
// drm_kms_helper_hotplug_event(dev);
 
340,21 → 415,10
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
 
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
return;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
mutex_lock(&dev->mode_config.mutex);
drm_kms_helper_poll_enable_locked(dev);
mutex_unlock(&dev->mode_config.mutex);
}
 
// if (poll)
// schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
/**
428,7 → 492,7
return false;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_for_each_connector(connector, dev) {
 
/* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
/drivers/video/drm/drm_rect.c
330,7 → 330,7
}
}
 
switch (rotation & 0xf) {
switch (rotation & DRM_ROTATE_MASK) {
case BIT(DRM_ROTATE_0):
break;
case BIT(DRM_ROTATE_90):
390,7 → 390,7
{
struct drm_rect tmp;
 
switch (rotation & 0xf) {
switch (rotation & DRM_ROTATE_MASK) {
case BIT(DRM_ROTATE_0):
break;
case BIT(DRM_ROTATE_90):
/drivers/video/drm/drm_stub.c
49,12 → 49,6
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
 
/*
* Default to use monotonic timestamps for wait-for-vblank and page-flip
* complete events.
*/
unsigned int drm_timestamp_monotonic = 1;
 
struct idr drm_minors_idr;
 
void drm_err(const char *format, ...)
568,4 → 562,11
return;
}
 
int drm_sysfs_connector_add(struct drm_connector *connector)
{
return 0;
}
 
void drm_sysfs_connector_remove(struct drm_connector *connector)
{
}
/drivers/video/drm/drm_vma_manager.c
50,8 → 50,7
*
* You must not use multiple offset managers on a single address_space.
* Otherwise, mm-core will be unable to tear down memory mappings as the VM will
* no longer be linear. Please use VM_NONLINEAR in that case and implement your
* own offset managers.
* no longer be linear.
*
* This offset manager works on page-based addresses. That is, every argument
* and return code (with the exception of drm_vma_node_offset_addr()) is given
113,7 → 112,7
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
 
/**
* drm_vma_offset_lookup() - Find node in offset space
* drm_vma_offset_lookup_locked() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
123,38 → 122,22
* region and the given node will be returned, as long as the node spans the
* whole requested area (given the size in number of pages as @pages).
*
* Note that before lookup the vma offset manager lookup lock must be acquired
* with drm_vma_offset_lock_lookup(). See there for an example. This can then be
* used to implement weakly referenced lookups using kref_get_unless_zero().
*
* Example:
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
* kref_get_unless_zero(container_of(node, sth, entr));
* drm_vma_offset_unlock_lookup(mgr);
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned. It's the caller's responsibility to make sure the node doesn't
* get destroyed before the caller can access it.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node;
 
read_lock(&mgr->vm_lock);
node = drm_vma_offset_lookup_locked(mgr, start, pages);
read_unlock(&mgr->vm_lock);
 
return node;
}
EXPORT_SYMBOL(drm_vma_offset_lookup);
 
/**
* drm_vma_offset_lookup_locked() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
* manually. See drm_vma_offset_lock_lookup() for an example.
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
/drivers/video/drm/i915/Gtt/intel-gtt.c
120,6 → 120,164
#define IS_IRONLAKE intel_private.driver->is_ironlake
#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
 
#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_gtt_map_memory(struct page **pages,
unsigned int num_entries,
struct sg_table *st)
{
struct scatterlist *sg;
int i;
 
DBG("try mapping %lu pages\n", (unsigned long)num_entries);
 
if (sg_alloc_table(st, num_entries, GFP_KERNEL))
goto err;
 
for_each_sg(st->sgl, sg, num_entries, i)
sg_set_page(sg, pages[i], PAGE_SIZE, 0);
 
if (!pci_map_sg(intel_private.pcidev,
st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
goto err;
 
return 0;
 
err:
sg_free_table(st);
return -ENOMEM;
}
 
static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
{
struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
 
pci_unmap_sg(intel_private.pcidev, sg_list,
num_sg, PCI_DMA_BIDIRECTIONAL);
 
st.sgl = sg_list;
st.orig_nents = st.nents = num_sg;
 
sg_free_table(&st);
}
 
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
return;
}
 
/* Exists to support ARGB cursors */
static struct page *i8xx_alloc_pages(void)
{
struct page *page;
 
page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
if (page == NULL)
return NULL;
 
if (set_pages_uc(page, 4) < 0) {
set_pages_wb(page, 4);
__free_pages(page, 2);
return NULL;
}
atomic_inc(&agp_bridge->current_memory_agp);
return page;
}
 
static void i8xx_destroy_pages(struct page *page)
{
if (page == NULL)
return;
 
set_pages_wb(page, 4);
__free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp);
}
#endif
 
#if IS_ENABLED(CONFIG_AGP_INTEL)
static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
int i;
 
if ((pg_start + mem->page_count)
> intel_private.num_dcache_entries)
return -EINVAL;
 
if (!mem->is_flushed)
global_cache_flush();
 
for (i = pg_start; i < (pg_start + mem->page_count); i++) {
dma_addr_t addr = i << PAGE_SHIFT;
intel_private.driver->write_entry(addr,
i, type);
}
wmb();
 
return 0;
}
 
/*
* The i810/i830 requires a physical address to program its mouse
* pointer into hardware.
* However the Xserver still writes to it through the agp aperture.
*/
static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
{
struct agp_memory *new;
struct page *page;
 
switch (pg_count) {
case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
break;
case 4:
/* kludge to get 4 physical pages for ARGB cursor */
page = i8xx_alloc_pages();
break;
default:
return NULL;
}
 
if (page == NULL)
return NULL;
 
new = agp_create_memory(pg_count);
if (new == NULL)
return NULL;
 
new->pages[0] = page;
if (pg_count == 4) {
/* kludge to get 4 physical pages for ARGB cursor */
new->pages[1] = new->pages[0] + 1;
new->pages[2] = new->pages[1] + 1;
new->pages[3] = new->pages[2] + 1;
}
new->page_count = pg_count;
new->num_scratch_pages = pg_count;
new->type = AGP_PHYS_MEMORY;
new->physical = page_to_phys(new->pages[0]);
return new;
}
 
static void intel_i810_free_by_type(struct agp_memory *curr)
{
agp_free_key(curr->key);
if (curr->type == AGP_PHYS_MEMORY) {
if (curr->page_count == 4)
i8xx_destroy_pages(curr->pages[0]);
else {
agp_bridge->driver->agp_destroy_page(curr->pages[0],
AGP_PAGE_DESTROY_UNMAP);
agp_bridge->driver->agp_destroy_page(curr->pages[0],
AGP_PAGE_DESTROY_FREE);
}
agp_free_page_array(curr);
}
kfree(curr);
}
#endif
 
static int intel_gtt_setup_scratch_page(void)
{
struct page *page;
373,7 → 531,7
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
intel_iommu_gfx_mapped)
return 1;
456,7 → 614,27
return 0;
}
 
#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_fetch_size(void)
{
int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
unsigned int aper_size;
int i;
 
aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
 
for (i = 0; i < num_sizes; i++) {
if (aper_size == intel_fake_agp_sizes[i].size) {
agp_bridge->current_size =
(void *) (intel_fake_agp_sizes + i);
return aper_size;
}
}
 
return 0;
}
#endif
 
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
511,7 → 689,35
 
return true;
}
EXPORT_SYMBOL(intel_enable_gtt);
 
#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
agp_bridge->gatt_table_real = NULL;
agp_bridge->gatt_table = NULL;
agp_bridge->gatt_bus_addr = 0;
 
return 0;
}
 
static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
{
return 0;
}
 
static int intel_fake_agp_configure(void)
{
if (!intel_enable_gtt())
return -EIO;
 
intel_private.clear_fake_agp = true;
agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
 
return 0;
}
#endif
 
static bool i830_check_flags(unsigned int flags)
{
switch (flags) {
545,7 → 751,7
j++;
}
}
readl(intel_private.gtt+j-1);
wmb();
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 
562,7 → 768,7
intel_private.driver->write_entry(addr,
j, flags);
}
readl(intel_private.gtt+j-1);
wmb();
}
 
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
625,8 → 831,53
intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
wmb();
}
EXPORT_SYMBOL(intel_gtt_clear_range);
 
#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
off_t pg_start, int type)
{
if (mem->page_count == 0)
return 0;
 
intel_gtt_clear_range(pg_start, mem->page_count);
 
if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL;
mem->num_sg = 0;
}
 
return 0;
}
 
static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
int type)
{
struct agp_memory *new;
 
if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
if (pg_count != intel_private.num_dcache_entries)
return NULL;
 
new = agp_create_memory(1);
if (new == NULL)
return NULL;
 
new->type = AGP_DCACHE_MEMORY;
new->page_count = pg_count;
new->num_scratch_pages = 0;
agp_free_page_array(new);
return new;
}
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
/* always return NULL for other allocation types for now */
return NULL;
}
#endif
static void intel_i915_setup_chipset_flush(void)
{
int ret;
769,6 → 1020,29
return 0;
}
 
#if IS_ENABLED(CONFIG_AGP_INTEL)
static const struct agp_bridge_driver intel_fake_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
.aperture_sizes = intel_fake_agp_sizes,
.num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
.configure = intel_fake_agp_configure,
.fetch_size = intel_fake_agp_fetch_size,
.cleanup = intel_gtt_cleanup,
.agp_enable = intel_fake_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = intel_fake_agp_create_gatt_table,
.free_gatt_table = intel_fake_agp_free_gatt_table,
.insert_memory = intel_fake_agp_insert_entries,
.remove_memory = intel_fake_agp_remove_entries,
.alloc_by_type = intel_fake_agp_alloc_by_type,
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
};
#endif
static const struct intel_gtt_driver i915_gtt_driver = {
.gen = 3,
.has_pgtbl_enable = 1,
970,8 → 1244,8
}
EXPORT_SYMBOL(intel_gmch_probe);
 
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, unsigned long *mappable_end)
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, u64 *mappable_end)
{
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*stolen_size = intel_private.stolen_size;
988,5 → 1262,5
EXPORT_SYMBOL(intel_gtt_chipset_flush);
 
 
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
/drivers/video/drm/i915/Makefile
1,12 → 1,12
 
CC = kos32-gcc
FASM = fasm.exe
 
CC = gcc
FASM = e:/fasm/fasm.exe
DEFINES = -DDRM_DEBUG_CODE=1 -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
 
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
17,7 → 17,7
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
 
CFLAGS= -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -msse2 -fomit-frame-pointer -fno-ident -fno-builtin-printf
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields
CFLAGS+= -mno-stack-arg-probe -mno-ms-bitfields
 
LIBPATH:= $(DDK_TOPDIR)
 
56,8 → 56,10
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_batch_pool.c \
i915_gem_context.c \
i915_gem_execbuffer.c \
i915_gem_fence.c \
i915_gem_evict.c \
i915_gem_gtt.c \
i915_gem_render_state.c \
64,10 → 66,15
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_gpu_error.c \
i915_guc_submission.c \
i915_irq.c \
i915_params.c \
i915_vgpu.c \
intel_atomic.c \
intel_atomic_plane.c \
intel_audio.c \
intel_bios.c \
intel_csr.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
78,13 → 85,16
intel_dsi_panel_vbt.c \
intel_dsi_pll.c \
intel_dvo.c \
intel_fbc.c \
intel_fbdev.c \
intel_fifo_underrun.c \
intel_frontbuffer.c \
intel_guc_loader.c \
intel_hdmi.c \
intel_i2c.c \
intel_lrc.c \
intel_lvds.c \
intel_mocs.c \
intel_modes.c \
intel_panel.c \
intel_pm.c \
100,8 → 110,8
intel_sprite.c \
intel_uncore.c \
kms_display.c \
kos_gem_fb.c \
utils.c \
fwblob.asm \
../hdmi.c \
Gtt/intel-agp.c \
Gtt/intel-gtt.c \
116,14 → 126,18
$(DRM_TOPDIR)/drm_dp_helper.c \
../drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_atomic.c \
$(DRM_TOPDIR)/drm_atomic_helper.c \
$(DRM_TOPDIR)/drm_bridge.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mipi_dsi.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_modeset_lock.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_panel.c \
$(DRM_TOPDIR)/drm_plane_helper.c \
$(DRM_TOPDIR)/drm_probe_helper.c \
$(DRM_TOPDIR)/drm_rect.c \
136,7 → 150,6
$(patsubst %.c, %.o, $(NAME_SRC))))
 
 
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) i915.lds Makefile
149,7 → 162,10
%.o : %.S $(HFILES) Makefile
as -o $@ $<
 
fwblob.o: fwblob.asm $(FW_BINS) Makefile
$(FASM) $< $@
 
 
clean:
-rm -f ../*/*.o
 
/drivers/video/drm/i915/Makefile.lto
1,9 → 1,10
 
CC = gcc
FASM = e:/fasm/fasm.exe
CC = kos32-gcc
FASM = fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
DEFINES = -DDRM_DEBUG_CODE=1 -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
 
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
54,8 → 55,10
i915_dma.c \
i915_drv.c \
i915_gem.c \
i915_gem_batch_pool.c \
i915_gem_context.c \
i915_gem_execbuffer.c \
i915_gem_fence.c \
i915_gem_evict.c \
i915_gem_gtt.c \
i915_gem_render_state.c \
62,10 → 65,15
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_gpu_error.c \
i915_guc_submission.c \
i915_irq.c \
i915_params.c \
i915_vgpu.c \
intel_atomic.c \
intel_atomic_plane.c \
intel_audio.c \
intel_bios.c \
intel_csr.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
76,13 → 84,16
intel_dsi_panel_vbt.c \
intel_dsi_pll.c \
intel_dvo.c \
intel_fbc.c \
intel_fbdev.c \
intel_fifo_underrun.c \
intel_frontbuffer.c \
intel_guc_loader.c \
intel_hdmi.c \
intel_i2c.c \
intel_lrc.c \
intel_lvds.c \
intel_mocs.c \
intel_modes.c \
intel_panel.c \
intel_pm.c \
98,8 → 109,8
intel_sprite.c \
intel_uncore.c \
kms_display.c \
kos_gem_fb.c \
utils.c \
fwblob.asm \
../hdmi.c \
Gtt/intel-agp.c \
Gtt/intel-gtt.c \
114,14 → 125,18
$(DRM_TOPDIR)/drm_dp_helper.c \
../drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_atomic.c \
$(DRM_TOPDIR)/drm_atomic_helper.c \
$(DRM_TOPDIR)/drm_bridge.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mipi_dsi.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_modeset_lock.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_panel.c \
$(DRM_TOPDIR)/drm_plane_helper.c \
$(DRM_TOPDIR)/drm_probe_helper.c \
$(DRM_TOPDIR)/drm_rect.c \
/drivers/video/drm/i915/dvo.h
94,8 → 94,8
* after this function is called.
*/
void (*mode_set)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode);
 
/*
* Probe for a connected output, and return detect_status.
/drivers/video/drm/i915/dvo_ch7017.c
255,8 → 255,8
}
 
static void ch7017_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
uint8_t outputs_enable, lvds_control_2, lvds_power_down;
/drivers/video/drm/i915/dvo_ch7xxx.c
275,8 → 275,8
}
 
static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
uint8_t tvco, tpcp, tpd, tlpf, idf;
 
/drivers/video/drm/i915/dvo_ivch.c
22,7 → 22,11
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Thomas Richter <thor@math.tu-berlin.de>
*
* Minor modifications (Dithering enable):
* Thomas Richter <thor@math.tu-berlin.de>
*
*/
 
#include "dvo.h"
59,6 → 63,8
# define VR01_DVO_BYPASS_ENABLE (1 << 1)
/** Enables the DVO clock */
# define VR01_DVO_ENABLE (1 << 0)
/** Enable dithering for 18bpp panels. Not documented. */
# define VR01_DITHER_ENABLE (1 << 4)
 
/*
* LCD Interface Format
74,6 → 80,8
# define VR10_INTERFACE_2X18 (2 << 2)
/** Enables 2x24-bit LVDS output */
# define VR10_INTERFACE_2X24 (3 << 2)
/** Mask that defines the depth of the pipeline */
# define VR10_INTERFACE_DEPTH_MASK (3 << 2)
 
/*
* VR20 LCD Horizontal Display Size
83,7 → 91,7
/*
* LCD Vertical Display Size
*/
#define VR21 0x20
#define VR21 0x21
 
/*
* Panel power down status
148,16 → 156,33
# define VR8F_POWER_MASK (0x3c)
# define VR8F_POWER_POS (2)
 
/* Some Bios implementations do not restore the DVO state upon
* resume from standby. Thus, this driver has to handle it
* instead. The following list contains all registers that
* require saving.
*/
static const uint16_t backup_addresses[] = {
0x11, 0x12,
0x18, 0x19, 0x1a, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x8e, 0x8f,
0x10 /* this must come last */
};
 
 
struct ivch_priv {
bool quiet;
 
uint16_t width, height;
 
/* Register backup */
 
uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
};
 
 
static void ivch_dump_regs(struct intel_dvo_device *dvo);
 
/**
* Reads a register on the ivch.
*
239,6 → 264,7
{
struct ivch_priv *priv;
uint16_t temp;
int i;
 
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
if (priv == NULL)
266,6 → 292,14
ivch_read(dvo, VR20, &priv->width);
ivch_read(dvo, VR21, &priv->height);
 
/* Make a backup of the registers to be able to restore them
* upon suspend.
*/
for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
 
ivch_dump_regs(dvo);
 
return true;
 
out:
287,6 → 321,23
return MODE_OK;
}
 
/* Restore the DVO registers after a resume
* from RAM. Registers have been saved during
* the initialization.
*/
static void ivch_reset(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
int i;
 
DRM_DEBUG_KMS("Resetting the IVCH registers\n");
 
ivch_write(dvo, VR10, 0x0000);
 
for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
}
 
/** Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
293,6 → 344,8
int i;
uint16_t vr01, vr30, backlight;
 
ivch_reset(dvo);
 
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return;
301,6 → 354,7
backlight = 1;
else
backlight = 0;
 
ivch_write(dvo, VR80, backlight);
 
if (enable)
327,6 → 381,8
{
uint16_t vr01;
 
ivch_reset(dvo);
 
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return false;
338,26 → 394,36
}
 
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct ivch_priv *priv = dvo->dev_priv;
uint16_t vr40 = 0;
uint16_t vr01;
uint16_t vr01 = 0;
uint16_t vr10;
 
vr01 = 0;
ivch_reset(dvo);
 
vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
 
/* Enable dithering for 18 bpp pipelines */
vr10 &= VR10_INTERFACE_DEPTH_MASK;
if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
vr01 = VR01_DITHER_ENABLE;
 
vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
VR40_HORIZONTAL_INTERP_ENABLE);
 
if (mode->hdisplay != adjusted_mode->hdisplay ||
mode->vdisplay != adjusted_mode->vdisplay) {
if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
mode->vdisplay != adjusted_mode->crtc_vdisplay) {
uint16_t x_ratio, y_ratio;
 
vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE;
x_ratio = (((mode->hdisplay - 1) << 16) /
(adjusted_mode->hdisplay - 1)) >> 2;
(adjusted_mode->crtc_hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
(adjusted_mode->vdisplay - 1)) >> 2;
(adjusted_mode->crtc_vdisplay - 1)) >> 2;
ivch_write(dvo, VR42, x_ratio);
ivch_write(dvo, VR41, y_ratio);
} else {
368,8 → 434,6
 
ivch_write(dvo, VR01, vr01);
ivch_write(dvo, VR40, vr40);
 
ivch_dump_regs(dvo);
}
 
static void ivch_dump_regs(struct intel_dvo_device *dvo)
380,6 → 444,8
DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR10, &val);
DRM_DEBUG_KMS("VR10: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
/drivers/video/drm/i915/dvo_ns2501.c
60,6 → 60,130
 
#define NS2501_REGC 0x0c
 
/*
* The following registers are not part of the official datasheet
* and are the result of reverse engineering.
*/
 
/*
* Register c0 controls how the DVO synchronizes with
* its input.
*/
#define NS2501_REGC0 0xc0
#define NS2501_C0_ENABLE (1<<0) /* enable the DVO sync in general */
#define NS2501_C0_HSYNC (1<<1) /* synchronize horizontal with input */
#define NS2501_C0_VSYNC (1<<2) /* synchronize vertical with input */
#define NS2501_C0_RESET (1<<7) /* reset the synchronization flip/flops */
 
/*
* Register 41 is somehow related to the sync register and sync
* configuration. It should be 0x32 whenever regC0 is 0x05 (hsync off)
* and 0x00 otherwise.
*/
#define NS2501_REG41 0x41
 
/*
* this register controls the dithering of the DVO
* One bit enables it, the other define the dithering depth.
* The higher the value, the lower the dithering depth.
*/
#define NS2501_F9_REG 0xf9
#define NS2501_F9_ENABLE (1<<0) /* if set, dithering is enabled */
#define NS2501_F9_DITHER_MASK (0x7f<<1) /* controls the dither depth */
#define NS2501_F9_DITHER_SHIFT 1 /* shifts the dither mask */
 
/*
* PLL configuration register. This is a pair of registers,
* one single byte register at 1B, and a pair at 1C,1D.
* These registers are counters/dividers.
*/
#define NS2501_REG1B 0x1b /* one byte PLL control register */
#define NS2501_REG1C 0x1c /* low-part of the second register */
#define NS2501_REG1D 0x1d /* high-part of the second register */
 
/*
* Scaler control registers. Horizontal at b8,b9,
* vertical at 10,11. The scale factor is computed as
* 2^16/control-value. The low-byte comes first.
*/
#define NS2501_REG10 0x10 /* low-byte vertical scaler */
#define NS2501_REG11 0x11 /* high-byte vertical scaler */
#define NS2501_REGB8 0xb8 /* low-byte horizontal scaler */
#define NS2501_REGB9 0xb9 /* high-byte horizontal scaler */
 
/*
* Display window definition. This consists of four registers
* per dimension. One register pair defines the start of the
* display, one the end.
* As far as I understand, this defines the window within which
* the scaler samples the input.
*/
#define NS2501_REGC1 0xc1 /* low-byte horizontal display start */
#define NS2501_REGC2 0xc2 /* high-byte horizontal display start */
#define NS2501_REGC3 0xc3 /* low-byte horizontal display stop */
#define NS2501_REGC4 0xc4 /* high-byte horizontal display stop */
#define NS2501_REGC5 0xc5 /* low-byte vertical display start */
#define NS2501_REGC6 0xc6 /* high-byte vertical display start */
#define NS2501_REGC7 0xc7 /* low-byte vertical display stop */
#define NS2501_REGC8 0xc8 /* high-byte vertical display stop */
 
/*
* The following register pair seems to define the start of
* the vertical sync. If automatic syncing is enabled, and the
* register value defines a sync pulse that is later than the
* incoming sync, then the register value is ignored and the
* external hsync triggers the synchronization.
*/
#define NS2501_REG80 0x80 /* low-byte vsync-start */
#define NS2501_REG81 0x81 /* high-byte vsync-start */
 
/*
* The following register pair seems to define the total number
* of lines created at the output side of the scaler.
* This is again a low-high register pair.
*/
#define NS2501_REG82 0x82 /* output display height, low byte */
#define NS2501_REG83 0x83 /* output display height, high byte */
 
/*
* The following registers define the end of the front-porch
* in horizontal and vertical position and hence allow to shift
* the image left/right or up/down.
*/
#define NS2501_REG98 0x98 /* horizontal start of display + 256, low */
#define NS2501_REG99 0x99 /* horizontal start of display + 256, high */
#define NS2501_REG8E 0x8e /* vertical start of the display, low byte */
#define NS2501_REG8F 0x8f /* vertical start of the display, high byte */
 
/*
* The following register pair control the function of the
* backlight and the DVO output. To enable the corresponding
* function, the corresponding bit must be set in both registers.
*/
#define NS2501_REG34 0x34 /* DVO enable functions, first register */
#define NS2501_REG35 0x35 /* DVO enable functions, second register */
#define NS2501_34_ENABLE_OUTPUT (1<<0) /* enable DVO output */
#define NS2501_34_ENABLE_BACKLIGHT (1<<1) /* enable backlight */
 
/*
* Registers 9C and 9D define the vertical output offset
* of the visible region.
*/
#define NS2501_REG9C 0x9c
#define NS2501_REG9D 0x9d
 
/*
* The register 9F defines the dithering. This requires the
* scaler to be ON. Bit 0 enables dithering, the remaining
* bits control the depth of the dither. The higher the value,
* the LOWER the dithering amplitude. A good value seems to be
* 15 (total register value).
*/
#define NS2501_REGF9 0xf9
#define NS2501_F9_ENABLE_DITHER (1<<0) /* enable dithering */
#define NS2501_F9_DITHER_MASK (0x7f<<1) /* dither masking */
#define NS2501_F9_DITHER_SHIFT 1 /* upshift of the dither mask */
 
enum {
MODE_640x480,
MODE_800x600,
72,274 → 196,178
};
 
/*
* Magic values based on what the BIOS on
* Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
* The following structure keeps the complete configuration of
* the DVO, given a specific output configuration.
* This is pretty much guess-work from reverse-engineering, so
* read all this with a grain of salt.
*/
static const struct ns2501_reg regs_1024x768[][86] = {
struct ns2501_configuration {
uint8_t sync; /* configuration of the C0 register */
uint8_t conf; /* configuration register 8 */
uint8_t syncb; /* configuration register 41 */
uint8_t dither; /* configuration of the dithering */
uint8_t pll_a; /* PLL configuration, register A, 1B */
uint16_t pll_b; /* PLL configuration, register B, 1C/1D */
uint16_t hstart; /* horizontal start, registers C1/C2 */
uint16_t hstop; /* horizontal total, registers C3/C4 */
uint16_t vstart; /* vertical start, registers C5/C6 */
uint16_t vstop; /* vertical total, registers C7/C8 */
uint16_t vsync; /* manual vertical sync start, 80/81 */
uint16_t vtotal; /* number of lines generated, 82/83 */
uint16_t hpos; /* horizontal position + 256, 98/99 */
uint16_t vpos; /* vertical position, 8e/8f */
uint16_t voffs; /* vertical output offset, 9c/9d */
uint16_t hscale; /* horizontal scaling factor, b8/b9 */
uint16_t vscale; /* vertical scaling factor, 10/11 */
};
 
/*
* DVO configuration values, partially based on what the BIOS
* of the Fujitsu Lifebook S6010 writes into registers,
* partially found by manual tweaking. These configurations assume
* a 1024x768 panel.
*/
static const struct ns2501_configuration ns2501_modes[] = {
[MODE_640x480] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x11, },
[5] = { .offset = 0x1c, .value = 0x54, },
[6] = { .offset = 0x1d, .value = 0x03, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0x90, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0x0f, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x16, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x02, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0xff, },
[20] = { .offset = 0x81, .value = 0x07, },
[21] = { .offset = 0x82, .value = 0x3d, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x0a, },
[33] = { .offset = 0x9c, .value = 0x24, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x00, },
[43] = { .offset = 0xb9, .value = 0xa0, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x00, },
[47] = { .offset = 0x11, .value = 0xa0, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x10, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x04, },
[67] = { .offset = 0xaa, .value = 0x70, },
[68] = { .offset = 0xab, .value = 0x4f, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x84, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x05, },
.sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC,
.conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
.syncb = 0x32,
.dither = 0x0f,
.pll_a = 17,
.pll_b = 852,
.hstart = 144,
.hstop = 783,
.vstart = 22,
.vstop = 514,
.vsync = 2047, /* actually, ignored with this config */
.vtotal = 1341,
.hpos = 0,
.vpos = 16,
.voffs = 36,
.hscale = 40960,
.vscale = 40960
},
[MODE_800x600] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x19, },
[5] = { .offset = 0x1c, .value = 0x64, },
[6] = { .offset = 0x1d, .value = 0x02, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0xd7, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0xf8, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x1a, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x73, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0x27, },
[20] = { .offset = 0x81, .value = 0x03, },
[21] = { .offset = 0x82, .value = 0x41, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x06, },
[33] = { .offset = 0x9c, .value = 0x23, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x30, },
[43] = { .offset = 0xb9, .value = 0xc8, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x20, },
[47] = { .offset = 0x11, .value = 0xc8, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x04, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x83, },
[67] = { .offset = 0xaa, .value = 0x40, },
[68] = { .offset = 0xab, .value = 0x32, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x80, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x07, },
.sync = NS2501_C0_ENABLE |
NS2501_C0_HSYNC | NS2501_C0_VSYNC,
.conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
.syncb = 0x00,
.dither = 0x0f,
.pll_a = 25,
.pll_b = 612,
.hstart = 215,
.hstop = 1016,
.vstart = 26,
.vstop = 627,
.vsync = 807,
.vtotal = 1341,
.hpos = 0,
.vpos = 4,
.voffs = 35,
.hscale = 51248,
.vscale = 51232
},
[MODE_1024x768] = {
.sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC,
.conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
.syncb = 0x32,
.dither = 0x0f,
.pll_a = 11,
.pll_b = 1350,
.hstart = 276,
.hstop = 1299,
.vstart = 15,
.vstop = 1056,
.vsync = 2047,
.vtotal = 1341,
.hpos = 0,
.vpos = 7,
.voffs = 27,
.hscale = 65535,
.vscale = 65535
}
};
 
/*
* Other configuration values left by the BIOS of the
* Fujitsu S6010 in the DVO control registers. Their
* value does not depend on the BIOS and their meaning
* is unknown.
*/
 
static const struct ns2501_reg mode_agnostic_values[] = {
/* 08 is mode specific */
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x11, },
[5] = { .offset = 0x1c, .value = 0x54, },
[6] = { .offset = 0x1d, .value = 0x03, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0x90, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0x0f, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x16, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x02, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0xff, },
[20] = { .offset = 0x81, .value = 0x07, },
[21] = { .offset = 0x82, .value = 0x3d, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x0a, },
[33] = { .offset = 0x9c, .value = 0x24, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x00, },
[43] = { .offset = 0xb9, .value = 0xa0, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x00, },
[47] = { .offset = 0x11, .value = 0xa0, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x10, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x04, },
[67] = { .offset = 0xaa, .value = 0x70, },
[68] = { .offset = 0xab, .value = 0x4f, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x84, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x01, },
},
/* 10,11 are part of the mode specific configuration */
[1] = { .offset = 0x12, .value = 0x02, },
[2] = { .offset = 0x18, .value = 0x07, },
[3] = { .offset = 0x19, .value = 0x00, },
[4] = { .offset = 0x1a, .value = 0x00, }, /* PLL?, ignored */
/* 1b,1c,1d are part of the mode specific configuration */
[5] = { .offset = 0x1e, .value = 0x02, },
[6] = { .offset = 0x1f, .value = 0x40, },
[7] = { .offset = 0x20, .value = 0x00, },
[8] = { .offset = 0x21, .value = 0x00, },
[9] = { .offset = 0x22, .value = 0x00, },
[10] = { .offset = 0x23, .value = 0x00, },
[11] = { .offset = 0x24, .value = 0x00, },
[12] = { .offset = 0x25, .value = 0x00, },
[13] = { .offset = 0x26, .value = 0x00, },
[14] = { .offset = 0x27, .value = 0x00, },
[15] = { .offset = 0x7e, .value = 0x18, },
/* 80-84 are part of the mode-specific configuration */
[16] = { .offset = 0x84, .value = 0x00, },
[17] = { .offset = 0x85, .value = 0x00, },
[18] = { .offset = 0x86, .value = 0x00, },
[19] = { .offset = 0x87, .value = 0x00, },
[20] = { .offset = 0x88, .value = 0x00, },
[21] = { .offset = 0x89, .value = 0x00, },
[22] = { .offset = 0x8a, .value = 0x00, },
[23] = { .offset = 0x8b, .value = 0x00, },
[24] = { .offset = 0x8c, .value = 0x10, },
[25] = { .offset = 0x8d, .value = 0x02, },
/* 8e,8f are part of the mode-specific configuration */
[26] = { .offset = 0x90, .value = 0xff, },
[27] = { .offset = 0x91, .value = 0x07, },
[28] = { .offset = 0x92, .value = 0xa0, },
[29] = { .offset = 0x93, .value = 0x02, },
[30] = { .offset = 0x94, .value = 0x00, },
[31] = { .offset = 0x95, .value = 0x00, },
[32] = { .offset = 0x96, .value = 0x05, },
[33] = { .offset = 0x97, .value = 0x00, },
/* 98,99 are part of the mode-specific configuration */
[34] = { .offset = 0x9a, .value = 0x88, },
[35] = { .offset = 0x9b, .value = 0x00, },
/* 9c,9d are part of the mode-specific configuration */
[36] = { .offset = 0x9e, .value = 0x25, },
[37] = { .offset = 0x9f, .value = 0x03, },
[38] = { .offset = 0xa0, .value = 0x28, },
[39] = { .offset = 0xa1, .value = 0x01, },
[40] = { .offset = 0xa2, .value = 0x28, },
[41] = { .offset = 0xa3, .value = 0x05, },
/* register 0xa4 is mode specific, but 0x80..0x84 works always */
[42] = { .offset = 0xa4, .value = 0x84, },
[43] = { .offset = 0xa5, .value = 0x00, },
[44] = { .offset = 0xa6, .value = 0x00, },
[45] = { .offset = 0xa7, .value = 0x00, },
[46] = { .offset = 0xa8, .value = 0x00, },
/* 0xa9 to 0xab are mode specific, but have no visible effect */
[47] = { .offset = 0xa9, .value = 0x04, },
[48] = { .offset = 0xaa, .value = 0x70, },
[49] = { .offset = 0xab, .value = 0x4f, },
[50] = { .offset = 0xac, .value = 0x00, },
[51] = { .offset = 0xad, .value = 0x00, },
[52] = { .offset = 0xb6, .value = 0x09, },
[53] = { .offset = 0xb7, .value = 0x03, },
/* b8,b9 are part of the mode-specific configuration */
[54] = { .offset = 0xba, .value = 0x00, },
[55] = { .offset = 0xbb, .value = 0x20, },
[56] = { .offset = 0xf3, .value = 0x90, },
[57] = { .offset = 0xf4, .value = 0x00, },
[58] = { .offset = 0xf7, .value = 0x88, },
/* f8 is mode specific, but the value does not matter */
[59] = { .offset = 0xf8, .value = 0x0a, },
[60] = { .offset = 0xf9, .value = 0x00, }
};
 
static const struct ns2501_reg regs_init[] = {
350,25 → 378,12
 
struct ns2501_priv {
bool quiet;
const struct ns2501_reg *regs;
const struct ns2501_configuration *conf;
};
 
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
 
/*
* For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
* laptops does not react on the i2c bus unless
* both the PLL is running and the display is configured in its native
* resolution.
* This function forces the DVO on, and stores the registers it touches.
* Afterwards, registers are restored to regular values.
*
* This is pretty much a hack, though it works.
* Without that, ns2501_readb and ns2501_writeb fail
* when switching the resolution.
*/
 
/*
** Read a register from the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
531,9 → 546,10
}
 
static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
const struct ns2501_configuration *conf;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
int mode_idx, i;
 
541,6 → 557,36
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
 
DRM_DEBUG_KMS("Detailed requested mode settings are:\n"
"clock : %d kHz\n"
"hdisplay : %d\n"
"hblank start : %d\n"
"hblank end : %d\n"
"hsync start : %d\n"
"hsync end : %d\n"
"htotal : %d\n"
"hskew : %d\n"
"vdisplay : %d\n"
"vblank start : %d\n"
"hblank end : %d\n"
"vsync start : %d\n"
"vsync end : %d\n"
"vtotal : %d\n",
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_hblank_start,
adjusted_mode->crtc_hblank_end,
adjusted_mode->crtc_hsync_start,
adjusted_mode->crtc_hsync_end,
adjusted_mode->crtc_htotal,
adjusted_mode->crtc_hskew,
adjusted_mode->crtc_vdisplay,
adjusted_mode->crtc_vblank_start,
adjusted_mode->crtc_vblank_end,
adjusted_mode->crtc_vsync_start,
adjusted_mode->crtc_vsync_end,
adjusted_mode->crtc_vtotal);
 
if (mode->hdisplay == 640 && mode->vdisplay == 480)
mode_idx = MODE_640x480;
else if (mode->hdisplay == 800 && mode->vdisplay == 600)
554,10 → 600,44
for (i = 0; i < ARRAY_SIZE(regs_init); i++)
ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
 
ns->regs = regs_1024x768[mode_idx];
/* Write the mode-agnostic values */
for (i = 0; i < ARRAY_SIZE(mode_agnostic_values); i++)
ns2501_writeb(dvo, mode_agnostic_values[i].offset,
mode_agnostic_values[i].value);
 
for (i = 0; i < 84; i++)
ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
/* Write now the mode-specific configuration */
conf = ns2501_modes + mode_idx;
ns->conf = conf;
 
ns2501_writeb(dvo, NS2501_REG8, conf->conf);
ns2501_writeb(dvo, NS2501_REG1B, conf->pll_a);
ns2501_writeb(dvo, NS2501_REG1C, conf->pll_b & 0xff);
ns2501_writeb(dvo, NS2501_REG1D, conf->pll_b >> 8);
ns2501_writeb(dvo, NS2501_REGC1, conf->hstart & 0xff);
ns2501_writeb(dvo, NS2501_REGC2, conf->hstart >> 8);
ns2501_writeb(dvo, NS2501_REGC3, conf->hstop & 0xff);
ns2501_writeb(dvo, NS2501_REGC4, conf->hstop >> 8);
ns2501_writeb(dvo, NS2501_REGC5, conf->vstart & 0xff);
ns2501_writeb(dvo, NS2501_REGC6, conf->vstart >> 8);
ns2501_writeb(dvo, NS2501_REGC7, conf->vstop & 0xff);
ns2501_writeb(dvo, NS2501_REGC8, conf->vstop >> 8);
ns2501_writeb(dvo, NS2501_REG80, conf->vsync & 0xff);
ns2501_writeb(dvo, NS2501_REG81, conf->vsync >> 8);
ns2501_writeb(dvo, NS2501_REG82, conf->vtotal & 0xff);
ns2501_writeb(dvo, NS2501_REG83, conf->vtotal >> 8);
ns2501_writeb(dvo, NS2501_REG98, conf->hpos & 0xff);
ns2501_writeb(dvo, NS2501_REG99, conf->hpos >> 8);
ns2501_writeb(dvo, NS2501_REG8E, conf->vpos & 0xff);
ns2501_writeb(dvo, NS2501_REG8F, conf->vpos >> 8);
ns2501_writeb(dvo, NS2501_REG9C, conf->voffs & 0xff);
ns2501_writeb(dvo, NS2501_REG9D, conf->voffs >> 8);
ns2501_writeb(dvo, NS2501_REGB8, conf->hscale & 0xff);
ns2501_writeb(dvo, NS2501_REGB9, conf->hscale >> 8);
ns2501_writeb(dvo, NS2501_REG10, conf->vscale & 0xff);
ns2501_writeb(dvo, NS2501_REG11, conf->vscale >> 8);
ns2501_writeb(dvo, NS2501_REGF9, conf->dither);
ns2501_writeb(dvo, NS2501_REG41, conf->syncb);
ns2501_writeb(dvo, NS2501_REGC0, conf->sync);
}
 
/* set the NS2501 power state */
579,34 → 659,32
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
 
if (enable) {
if (WARN_ON(ns->regs[83].offset != 0x08 ||
ns->regs[84].offset != 0x41 ||
ns->regs[85].offset != 0xc0))
return;
ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync | 0x08);
 
ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
ns2501_writeb(dvo, NS2501_REG41, ns->conf->syncb);
 
ns2501_writeb(dvo, 0x41, ns->regs[84].value);
 
ns2501_writeb(dvo, 0x34, 0x01);
ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT);
msleep(15);
 
ns2501_writeb(dvo, 0x08, 0x35);
if (!(ns->regs[83].value & NS2501_8_BPAS))
ns2501_writeb(dvo, 0x08, 0x31);
ns2501_writeb(dvo, NS2501_REG8,
ns->conf->conf | NS2501_8_BPAS);
if (!(ns->conf->conf & NS2501_8_BPAS))
ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf);
msleep(200);
 
ns2501_writeb(dvo, 0x34, 0x03);
ns2501_writeb(dvo, NS2501_REG34,
NS2501_34_ENABLE_OUTPUT | NS2501_34_ENABLE_BACKLIGHT);
 
ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync);
} else {
ns2501_writeb(dvo, 0x34, 0x01);
ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT);
msleep(200);
 
ns2501_writeb(dvo, 0x08, 0x34);
ns2501_writeb(dvo, NS2501_REG8, NS2501_8_VEN | NS2501_8_HEN |
NS2501_8_BPAS);
msleep(15);
 
ns2501_writeb(dvo, 0x34, 0x00);
ns2501_writeb(dvo, NS2501_REG34, 0x00);
}
}
 
/drivers/video/drm/i915/dvo_sil164.c
190,8 → 190,8
}
 
static void sil164_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
/* As long as the basics are set up, since we don't have clock
* dependencies in the mode setup, we can just leave the
/drivers/video/drm/i915/dvo_tfp410.c
222,8 → 222,8
}
 
static void tfp410_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
/* As long as the basics are set up, since we don't have clock dependencies
* in the mode setup, we can just leave the registers alone and everything
/drivers/video/drm/i915/firmware/skl_guc_ver4.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/fwblob.asm
0,0 → 1,44
 
format MS COFF
 
;struct builtin_fw {
; char *name;
; void *data;
; unsigned long size;
;};
 
public ___start_builtin_fw
public ___end_builtin_fw
 
section '.text' code readable executable align 16
 
align 16
 
macro CP_code [arg]
{
dd FIRMWARE_#arg#_CP
dd arg#_CP_START
dd (arg#_CP_END - arg#_CP_START)
}
 
macro CP_firmware [arg]
{
forward
FIRMWARE_#arg#_CP db 'i915/',`arg,'.bin',0
forward
 
align 16
arg#_CP_START:
file "firmware/"#`arg#".bin"
arg#_CP_END:
}
 
___start_builtin_fw:
 
CP_code skl_guc_ver4
 
___end_builtin_fw:
 
CP_firmware skl_guc_ver4
 
 
/drivers/video/drm/i915/hpet.c
0,0 → 1,320
#include <syscall.h>
#include <linux/ktime.h>
 
typedef unsigned int addr_t;
 
#define ACPI_NAME_SIZE 4
#define ACPI_OEM_ID_SIZE 6
#define ACPI_OEM_TABLE_ID_SIZE 8
 
#define ACPI_RSDP_CHECKSUM_LENGTH 20
#define ACPI_RSDP_XCHECKSUM_LENGTH 36
 
typedef struct __attribute__((packed))
{
u32 sig;
u32 len;
u8 rev;
u8 csum;
char oem_id[ACPI_OEM_ID_SIZE];
char oem_tid[ACPI_OEM_TABLE_ID_SIZE];
u32 oem_rev;
u32 creator_id;
u32 creator_rev;
}acpi_thead_t;
 
typedef struct __attribute__((packed))
{
u8 space_id; /* Address space where struct or register exists */
u8 bit_width; /* Size in bits of given register */
u8 bit_offset; /* Bit offset within the register */
u8 access_width; /* Minimum Access size (ACPI 3.0) */
u64 address; /* 64-bit address of struct or register */
}acpi_address_t;
 
 
typedef struct __attribute__((packed))
{
acpi_thead_t header; /* Common ACPI table header */
u32 id; /* Hardware ID of event timer block */
acpi_address_t address; /* Address of event timer block */
u8 sequence; /* HPET sequence number */
u16 minimum_tick; /* Main counter min tick, periodic mode */
u8 flags;
}acpi_hpet_t;
 
typedef struct __attribute__((packed))
{
acpi_thead_t header;
u32 ptrs[0];
}acpi_rsdt_t;
 
typedef struct __attribute__((packed))
{
acpi_thead_t header;
u64 ptrs[0];
}acpi_xsdt_t;
 
typedef struct __attribute__((packed))
{
u64 sig;
u8 csum;
char oemid[6];
u8 rev;
u32 rsdt_ptr;
u32 rsdt_len;
u64 xsdt_ptr;
u8 xcsum;
u8 _rsvd_33[3];
}acpi_rsdp_t;
 
#define OS_BASE 0x80000000
#define ACPI20_PC99_RSDP_START (OS_BASE + 0x0e0000)
#define ACPI20_PC99_RSDP_END (OS_BASE + 0x100000)
#define ACPI20_PC99_RSDP_SIZE (ACPI20_PC99_RSDP_END - ACPI20_PC99_RSDP_START)
 
static acpi_thead_t* (*sdt_find)(void *sdt, u32 sig);
 
static u8 acpi_tb_checksum (u8 *buffer, u32 len)
{
u8 sum = 0;
u8 *end = buffer + len;
 
while (buffer < end)
{
sum = (u8)(sum + *(buffer++));
}
 
return sum;
}
 
static acpi_rsdp_t* acpi_locate()
{
addr_t p;
 
for (p = ACPI20_PC99_RSDP_START; p < ACPI20_PC99_RSDP_END; p+=16)
{
acpi_rsdp_t* r = (acpi_rsdp_t*) p;
if (r->sig != 0x2052545020445352 )
continue;
 
if (acpi_tb_checksum ((u8*)r, ACPI_RSDP_CHECKSUM_LENGTH) != 0)
continue;
 
if ((r->rev >= 2) &&
(acpi_tb_checksum ((u8*)r, ACPI_RSDP_XCHECKSUM_LENGTH) != 0))
continue;
 
return r;
};
 
return NULL;
};
 
acpi_thead_t* rsdt_find(acpi_rsdt_t *rsdt, u32 sig)
{
acpi_thead_t *head = NULL;
u32 i;
 
for (i = 0; i < ((rsdt->header.len-sizeof(acpi_thead_t))/
sizeof(rsdt->ptrs[0])); i++)
{
u32 ptr = rsdt->ptrs[i];
 
acpi_thead_t* t = (acpi_thead_t*)MapIoMem(ptr, 8192, PG_SW);
 
if (t->sig == sig)
{
head = t;
break;
};
FreeKernelSpace(t);
}
return head;
};
 
acpi_thead_t* xsdt_find(acpi_xsdt_t *xsdt, u32 sig)
{
acpi_thead_t *head = NULL;
u32 i;
 
for (i = 0; i < ((xsdt->header.len-sizeof(acpi_thead_t))/
sizeof(xsdt->ptrs[0])); i++)
{
u32 ptr = xsdt->ptrs[i];
 
acpi_thead_t* t = (acpi_thead_t*)MapIoMem(ptr, 8192, PG_SW);
 
if (t->sig == sig)
{
head = t;
break;
};
FreeKernelSpace(t);
}
return head;
};
 
static void dump_rsdt(acpi_rsdt_t *rsdt)
{
int i;
 
for (i = 0; i < ((rsdt->header.len-sizeof(acpi_thead_t))/
sizeof(rsdt->ptrs[0])); i++)
{
u32 ptr = rsdt->ptrs[i];
dbgprintf("%s ptr= %p\n", __FUNCTION__, ptr);
 
acpi_thead_t* t = (acpi_thead_t*)MapIoMem(ptr, 8192, PG_SW);
dbgprintf("%s t= %x\n", __FUNCTION__, t);
 
char *p = (char*)&t->sig;
printf("sig %d: %x %c%c%c%c base %p\n", i, t->sig,
p[0],p[1],p[2],p[3],rsdt->ptrs[i]);
FreeKernelSpace(t);
};
};
 
typedef struct
{
u64 hpet_cap; /* capabilities */
u64 res0; /* reserved */
u64 hpet_config; /* configuration */
u64 res1; /* reserved */
u64 hpet_isr; /* interrupt status reg */
u64 res2[25]; /* reserved */
union { /* main counter */
volatile u64 _hpet_mc64;
u32 _hpet_mc32;
unsigned long _hpet_mc;
} _u0;
u64 res3; /* reserved */
struct hpet_timer {
u64 hpet_config; /* configuration/cap */
union { /* timer compare register */
u64 _hpet_hc64;
u32 _hpet_hc32;
unsigned long _hpet_compare;
} _u1;
u64 hpet_fsb[2]; /* FSB route */
} hpet_timers[1];
}hpet_t;
 
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
 
#define HPET_ID_NUMBER 0x00001f00
#define HPET_ID_NUMBER_SHIFT 8
 
#define HPET_CFG_ENABLE 0x001
 
static void *hpet_virt_address;
 
inline unsigned int hpet_readl(unsigned int a)
{
return readl(hpet_virt_address + a);
}
 
static inline void hpet_writel(unsigned int d, unsigned int a)
{
writel(d, hpet_virt_address + a);
}
 
static void hpet_start_counter(void)
{
unsigned int cfg = hpet_readl(HPET_CFG);
cfg |= HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
}
 
 
u64 read_htime()
{
u32 eflags;
u64 val;
 
eflags = safe_cli();
asm volatile(
"1:\n"
"mov 0xf4(%%ebx), %%edx\n"
"mov 0xf0(%%ebx), %%eax\n"
"mov 0xf4(%%ebx), %%ecx\n"
"cmpl %%edx, %%ecx\n"
"jnz 1b\n"
:"=A"(val)
:"b" (hpet_virt_address)
:"ecx");
safe_sti(eflags);
return val;
}
 
static u32 period;
 
void init_hpet()
{
void *sdt = NULL;
 
acpi_rsdp_t *rsdp = acpi_locate();
 
if (unlikely(rsdp == NULL))
{
printf("No ACPI RSD table\n");
return ;
};
 
printf("rsd base address %p\n", rsdp);
 
if(rsdp->rev > 1)
{
sdt = (void*)(u32)rsdp->xsdt_ptr;
sdt_find = xsdt_find;
}
else
{
sdt = (void*)rsdp->rsdt_ptr;
sdt_find = rsdt_find;
};
 
printf("sdt address %p\n", sdt);
 
if (sdt == NULL)
{
printf("Invalid ACPI RSD table\n");
return ;
};
 
sdt = MapIoMem(sdt, 128*1024, PG_SW);
 
printf("sdt mapped address %x\n", sdt);
 
acpi_hpet_t *tbl = (acpi_hpet_t*)sdt_find(sdt, 0x54455048);
 
u32 hpet_address = tbl->address.address;
 
hpet_virt_address = (void*)MapIoMem(hpet_address,1024, PG_SW|0x18);
 
printf("hpet address %x mapped at %x\n", hpet_address, hpet_virt_address);
 
u32 timers, l, h;
 
l = hpet_readl(HPET_ID);
h = hpet_readl(HPET_PERIOD);
period = h / 1000000;
 
timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
l = hpet_readl(HPET_CFG);
h = hpet_readl(HPET_STATUS);
printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
l = hpet_readl(HPET_COUNTER);
h = hpet_readl(HPET_COUNTER+4);
printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
 
hpet_start_counter();
 
}
 
 
/drivers/video/drm/i915/i915_cmd_parser.c
123,8 → 123,8
CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
CMD( MI_STORE_REGISTER_MEM, SMI, F, 3, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
131,7 → 131,7
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
CMD( MI_LOAD_REGISTER_MEM, SMI, F, 3, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
151,6 → 151,7
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
210,6 → 211,7
CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
CMD( MI_RS_CONTROL, SMI, F, 1, S ),
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
229,6 → 231,7
 
static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
272,6 → 275,7
 
static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
391,16 → 395,39
 
/*
* Register whitelists, sorted by increasing register offset.
*/
 
/*
* An individual whitelist entry granting access to register addr. If
* mask is non-zero the argument of immediate register writes will be
* AND-ed with mask, and the command will be rejected if the result
* doesn't match value.
*
* Registers with non-zero mask are only allowed to be written using
* LRI.
*/
struct drm_i915_reg_descriptor {
u32 addr;
u32 mask;
u32 value;
};
 
/* Convenience macro for adding 32-bit registers. */
#define REG32(address, ...) \
{ .addr = address, __VA_ARGS__ }
 
/*
* Convenience macro for adding 64-bit registers.
*
* Some registers that userspace accesses are 64 bits. The register
* access commands only allow 32-bit accesses. Hence, we have to include
* entries for both halves of the 64-bit registers.
*/
#define REG64(addr) \
REG32(addr), REG32(addr + sizeof(u32))
 
/* Convenience macro for adding 64-bit registers */
#define REG64(addr) (addr), (addr + sizeof(u32))
 
static const u32 gen7_render_regs[] = {
static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
REG64(IA_VERTICES_COUNT),
412,15 → 439,18
REG64(CL_PRIMITIVES_COUNT),
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
OACONTROL, /* Only allowed for LRI and SRM. See below. */
REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
GEN7_3DPRIM_END_OFFSET,
GEN7_3DPRIM_START_VERTEX,
GEN7_3DPRIM_VERTEX_COUNT,
GEN7_3DPRIM_INSTANCE_COUNT,
GEN7_3DPRIM_START_INSTANCE,
GEN7_3DPRIM_BASE_VERTEX,
REG32(GEN7_3DPRIM_END_OFFSET),
REG32(GEN7_3DPRIM_START_VERTEX),
REG32(GEN7_3DPRIM_VERTEX_COUNT),
REG32(GEN7_3DPRIM_INSTANCE_COUNT),
REG32(GEN7_3DPRIM_START_INSTANCE),
REG32(GEN7_3DPRIM_BASE_VERTEX),
REG32(GEN7_GPGPU_DISPATCHDIMX),
REG32(GEN7_GPGPU_DISPATCHDIMY),
REG32(GEN7_GPGPU_DISPATCHDIMZ),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
429,33 → 459,41
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
GEN7_SO_WRITE_OFFSET(0),
GEN7_SO_WRITE_OFFSET(1),
GEN7_SO_WRITE_OFFSET(2),
GEN7_SO_WRITE_OFFSET(3),
GEN7_L3SQCREG1,
GEN7_L3CNTLREG2,
GEN7_L3CNTLREG3,
REG32(GEN7_SO_WRITE_OFFSET(0)),
REG32(GEN7_SO_WRITE_OFFSET(1)),
REG32(GEN7_SO_WRITE_OFFSET(2)),
REG32(GEN7_SO_WRITE_OFFSET(3)),
REG32(GEN7_L3SQCREG1),
REG32(GEN7_L3CNTLREG2),
REG32(GEN7_L3CNTLREG3),
REG32(HSW_SCRATCH1,
.mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
.value = 0),
REG32(HSW_ROW_CHICKEN3,
.mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
.value = 0),
};
 
static const u32 gen7_blt_regs[] = {
BCS_SWCTRL,
static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG32(BCS_SWCTRL),
};
 
static const u32 ivb_master_regs[] = {
FORCEWAKE_MT,
DERRMR,
GEN7_PIPE_DE_LOAD_SL(PIPE_A),
GEN7_PIPE_DE_LOAD_SL(PIPE_B),
GEN7_PIPE_DE_LOAD_SL(PIPE_C),
static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
REG32(FORCEWAKE_MT),
REG32(DERRMR),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
};
 
static const u32 hsw_master_regs[] = {
FORCEWAKE_MT,
DERRMR,
static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
REG32(FORCEWAKE_MT),
REG32(DERRMR),
};
 
#undef REG64
#undef REG32
 
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
481,13 → 519,17
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
 
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
if (subclient == INSTR_MEDIA_SUBCLIENT)
if (subclient == INSTR_MEDIA_SUBCLIENT) {
if (op == 6)
return 0xFFFF;
else
return 0xFFF;
else
} else
return 0xFF;
}
 
525,7 → 567,7
 
for (j = 0; j < table->count; j++) {
const struct drm_i915_cmd_descriptor *desc =
&table->table[i];
&table->table[j];
u32 curr = desc->cmd.value & desc->cmd.mask;
 
if (curr < previous) {
541,7 → 583,9
return ret;
}
 
static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
static bool check_sorted(int ring_id,
const struct drm_i915_reg_descriptor *reg_table,
int reg_count)
{
int i;
u32 previous = 0;
548,7 → 592,7
bool ret = true;
 
for (i = 0; i < reg_count; i++) {
u32 curr = reg_table[i];
u32 curr = reg_table[i].addr;
 
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
716,7 → 760,8
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
 
if (hash_empty(ring->cmd_hash)) {
WARN_ON(!hash_empty(ring->cmd_hash));
 
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n");
723,7 → 768,6
fini_hash_table(ring);
return ret;
}
}
 
ring->needs_cmd_parser = true;
 
795,25 → 839,31
return default_desc;
}
 
static bool valid_reg(const u32 *table, int count, u32 addr)
static const struct drm_i915_reg_descriptor *
find_reg(const struct drm_i915_reg_descriptor *table,
int count, u32 addr)
{
if (table && count != 0) {
if (table) {
int i;
 
for (i = 0; i < count; i++) {
if (table[i] == addr)
return true;
if (table[i].addr == addr)
return &table[i];
}
}
 
return false;
return NULL;
}
 
static u32 *vmap_batch(struct drm_i915_gem_object *obj)
static u32 *vmap_batch(struct drm_i915_gem_object *obj,
unsigned start, unsigned len)
{
int i;
void *addr = NULL;
struct sg_page_iter sg_iter;
int first_page = start >> PAGE_SHIFT;
int last_page = (len + start + 4095) >> PAGE_SHIFT;
int npages = last_page - first_page;
struct page **pages;
 
pages = kmalloc(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
823,12 → 873,13
}
 
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
pages[i] = sg_page_iter_page(&sg_iter);
i++;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
pages[i++] = sg_page_iter_page(&sg_iter);
if (i == npages)
break;
}
 
// addr = vmap(pages, i, 0, PAGE_KERNEL);
addr = vmap(pages, i, 0, PAGE_KERNEL);
if (addr == NULL) {
DRM_DEBUG_DRIVER("Failed to vmap pages\n");
goto finish;
836,10 → 887,68
 
finish:
if (pages)
free(pages);
drm_free_large(pages);
return (u32*)addr;
}
 
/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
struct drm_i915_gem_object *src_obj,
u32 batch_start_offset,
u32 batch_len)
{
int needs_clflush = 0;
void *src_base, *src;
void *dst = NULL;
int ret;
 
if (batch_len > dest_obj->base.size ||
batch_len + batch_start_offset > src_obj->base.size)
return ERR_PTR(-E2BIG);
 
if (WARN_ON(dest_obj->pages_pin_count == 0))
return ERR_PTR(-ENODEV);
 
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
return ERR_PTR(ret);
}
 
src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM;
goto unpin_src;
}
 
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
goto unmap_src;
}
 
dst = vmap_batch(dest_obj, 0, batch_len);
if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
ret = -ENOMEM;
goto unmap_src;
}
 
src = src_base + offset_in_page(batch_start_offset);
if (needs_clflush)
drm_clflush_virt_range(src, batch_len);
 
memcpy(dst, src, batch_len);
 
unmap_src:
vunmap(src_base);
unpin_src:
i915_gem_object_unpin_pages(src_obj);
 
return ret ? ERR_PTR(ret) : dst;
}
 
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
862,7 → 971,7
 
static bool check_cmd(const struct intel_engine_cs *ring,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd,
const u32 *cmd, u32 length,
const bool is_master,
bool *oacontrol_set)
{
878,16 → 987,41
}
 
if (desc->flags & CMD_DESC_REGISTER) {
u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
/*
* Get the distance between individual register offset
* fields if the command can perform more than one
* access at a time.
*/
const u32 step = desc->reg.step ? desc->reg.step : length;
u32 offset;
 
for (offset = desc->reg.offset; offset < length;
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
find_reg(ring->reg_table, ring->reg_count,
reg_addr);
 
if (!reg && is_master)
reg = find_reg(ring->master_reg_table,
ring->master_reg_count,
reg_addr);
 
if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
reg_addr, *cmd, ring->id);
return false;
}
 
/*
* OACONTROL requires some special handling for writes. We
* want to make sure that any batch which enables OA also
* disables it before the end of the batch. The goal is to
* prevent one process from snooping on the perf data from
* another process. To do that, we need to check the value
* that will be written to the register. Hence, limit
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
* OACONTROL requires some special handling for
* writes. We want to make sure that any batch which
* enables OA also disables it before the end of the
* batch. The goal is to prevent one process from
* snooping on the perf data from another process. To do
* that, we need to check the value that will be written
* to the register. Hence, limit OACONTROL writes to
* only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
896,23 → 1030,30
}
 
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[2] != 0);
*oacontrol_set = (cmd[offset + 1] != 0);
}
 
if (!valid_reg(ring->reg_table,
ring->reg_count, reg_addr)) {
if (!is_master ||
!valid_reg(ring->master_reg_table,
ring->master_reg_count,
reg_addr)) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
reg_addr,
*cmd,
ring->id);
/*
* Check the value written to the register against the
* allowed mask/value pair given in the whitelist entry.
*/
if (reg->mask) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
reg_addr);
return false;
}
 
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
reg_addr);
return false;
}
}
}
}
 
if (desc->flags & CMD_DESC_BITMASK) {
int i;
957,7 → 1098,9
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master?
*
* Parses the specified batch buffer looking for privilege violations as
968,34 → 1111,31
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
u32 batch_len,
bool is_master)
{
int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
int ret = 0;
 
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
return ret;
batch_base = copy_batch(shadow_batch_obj, batch_obj,
batch_start_offset, batch_len);
if (IS_ERR(batch_base)) {
DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
return PTR_ERR(batch_base);
}
 
batch_base = vmap_batch(batch_obj);
if (!batch_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
i915_gem_object_unpin_pages(batch_obj);
return -ENOMEM;
}
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
batch_end = batch_base + (batch_len / sizeof(*batch_end));
 
if (needs_clflush)
drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
 
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
 
cmd = batch_base;
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
u32 length;
1035,7 → 1175,8
break;
}
 
if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
if (!check_cmd(ring, desc, cmd, length, is_master,
&oacontrol_set)) {
ret = -EINVAL;
break;
}
1053,10 → 1194,8
ret = -EINVAL;
}
 
// vunmap(batch_base);
vunmap(batch_base);
 
i915_gem_object_unpin_pages(batch_obj);
 
return ret;
}
#endif
1078,6 → 1217,9
* hardware parsing enabled (so does not allow new use cases).
* 2. Allow access to the MI_PREDICATE_SRC0 and
* MI_PREDICATE_SRC1 registers.
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers.
*/
return 2;
return 5;
}
/drivers/video/drm/i915/i915_dma.c
36,6 → 36,7
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
44,6 → 45,7
//#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
//#include <acpi/video.h>
#include <linux/pm_runtime.h>
 
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
 
63,11 → 65,14
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
case I915_PARAM_REVISION:
value = dev->pdev->revision;
break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
value = dev_priv->num_fence_regs;
break;
case I915_PARAM_HAS_OVERLAY:
value = dev_priv->overlay ? 1 : 0;
88,6 → 93,9
case I915_PARAM_HAS_VEBOX:
value = intel_ring_initialized(&dev_priv->ring[VECS]);
break;
case I915_PARAM_HAS_BSD2:
value = intel_ring_initialized(&dev_priv->ring[VCS2]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
break;
139,6 → 147,26
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1;
break;
case I915_PARAM_MMAP_VERSION:
value = 1;
break;
case I915_PARAM_SUBSLICE_TOTAL:
value = INTEL_INFO(dev)->subslice_total;
if (!value)
return -ENODEV;
break;
case I915_PARAM_EU_TOTAL:
value = INTEL_INFO(dev)->eu_total;
if (!value)
return -ENODEV;
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck &&
intel_has_gpu_reset(dev);
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev);
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
149,37 → 177,6
return 0;
}
 
#if 0
static int i915_setparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
 
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
case I915_SETPARAM_ALLOW_BATCHBUFFER:
/* Reject all old ums/dri params. */
return -ENODEV;
 
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
param->value < 0)
return -EINVAL;
/* Userspace can use first N regs */
dev_priv->fence_reg_start = param->value;
break;
default:
DRM_DEBUG_DRIVER("unknown parameter %d\n",
param->param);
return -EINVAL;
}
 
return 0;
}
#endif
 
static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
200,8 → 197,6
#define DEVEN_MCHBAR_EN (1 << 28)
 
 
 
 
/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
227,15 → 222,16
/* If it's already enabled, don't have to do anything */
if (enabled)
return;
 
dbgprintf("Epic fail\n");
 
#if 0
/*
if (intel_alloc_mchbar_resource(dev))
return;
 
God help us all
*/
dev_priv->mchbar_need_disable = true;
 
DRM_INFO("enable MCHBAR\n");
 
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
244,10 → 240,31
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
}
#endif
}
 
static void
intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
 
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
temp &= ~DEVEN_MCHBAR_EN;
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
temp &= ~1;
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
}
}
 
if (dev_priv->mch_res.start)
release_resource(&dev_priv->mch_res);
}
 
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
317,7 → 334,7
goto cleanup_gem;
 
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev_priv);
// intel_hpd_init(dev_priv);
 
/*
* Some ports require correctly set-up hpd registers for detection to
383,6 → 400,39
}
#endif
 
#if !defined(CONFIG_VGA_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
return 0;
}
#elif !defined(CONFIG_DUMMY_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
return -ENODEV;
}
#else
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
{
int ret = 0;
 
DRM_INFO("Replacing VGA console driver\n");
 
console_lock();
if (con_is_bound(&vga_con))
ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
if (ret == 0) {
ret = do_unregister_con_driver(&vga_con);
 
/* Ignore "already unregistered". */
if (ret == -ENODEV)
ret = 0;
}
console_unlock();
 
return ret;
}
#endif
 
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
403,7 → 453,206
#undef SEP_COMMA
}
 
static void cherryview_sseu_info_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info;
u32 fuse, eu_dis;
 
info = (struct intel_device_info *)&dev_priv->info;
fuse = I915_READ(CHV_FUSE_GT);
 
info->slice_total = 1;
 
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
info->subslice_per_slice++;
eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
CHV_FGT_EU_DIS_SS0_R1_MASK);
info->eu_total += 8 - hweight32(eu_dis);
}
 
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
info->subslice_per_slice++;
eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
CHV_FGT_EU_DIS_SS1_R1_MASK);
info->eu_total += 8 - hweight32(eu_dis);
}
 
info->subslice_total = info->subslice_per_slice;
/*
* CHV expected to always have a uniform distribution of EU
* across subslices.
*/
info->eu_per_subslice = info->subslice_total ?
info->eu_total / info->subslice_total :
0;
/*
* CHV supports subslice power gating on devices with more than
* one subslice, and supports EU power gating on devices with
* more than one EU pair per subslice.
*/
info->has_slice_pg = 0;
info->has_subslice_pg = (info->subslice_total > 1);
info->has_eu_pg = (info->eu_per_subslice > 2);
}
 
static void gen9_sseu_info_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info;
int s_max = 3, ss_max = 4, eu_max = 8;
int s, ss;
u32 fuse2, s_enable, ss_disable, eu_disable;
u8 eu_mask = 0xff;
 
info = (struct intel_device_info *)&dev_priv->info;
fuse2 = I915_READ(GEN8_FUSE2);
s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
GEN8_F2_S_ENA_SHIFT;
ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
GEN9_F2_SS_DIS_SHIFT;
 
info->slice_total = hweight32(s_enable);
/*
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
info->subslice_per_slice = ss_max - hweight32(ss_disable);
info->subslice_total = info->slice_total *
info->subslice_per_slice;
 
/*
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
for (s = 0; s < s_max; s++) {
if (!(s_enable & (0x1 << s)))
/* skip disabled slice */
continue;
 
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
for (ss = 0; ss < ss_max; ss++) {
int eu_per_ss;
 
if (ss_disable & (0x1 << ss))
/* skip disabled subslice */
continue;
 
eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
eu_mask);
 
/*
* Record which subslice(s) has(have) 7 EUs. we
* can tune the hash used to spread work among
* subslices if they are unbalanced.
*/
if (eu_per_ss == 7)
info->subslice_7eu[s] |= 1 << ss;
 
info->eu_total += eu_per_ss;
}
}
 
/*
* SKL is expected to always have a uniform distribution
* of EU across subslices with the exception that any one
* EU in any one subslice may be fused off for die
* recovery. BXT is expected to be perfectly uniform in EU
* distribution.
*/
info->eu_per_subslice = info->subslice_total ?
DIV_ROUND_UP(info->eu_total,
info->subslice_total) : 0;
/*
* SKL supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with
* more than one EU pair per subslice. BXT supports subslice
* power gating on devices with more than one subslice, and
* supports EU power gating on devices with more than one EU
* pair per subslice.
*/
info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
info->has_eu_pg = (info->eu_per_subslice > 2);
}
 
static void broadwell_sseu_info_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info;
const int s_max = 3, ss_max = 3, eu_max = 8;
int s, ss;
u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
 
fuse2 = I915_READ(GEN8_FUSE2);
s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
 
eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
(32 - GEN8_EU_DIS0_S1_SHIFT));
eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
(32 - GEN8_EU_DIS1_S2_SHIFT));
 
 
info = (struct intel_device_info *)&dev_priv->info;
info->slice_total = hweight32(s_enable);
 
/*
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
info->subslice_per_slice = ss_max - hweight32(ss_disable);
info->subslice_total = info->slice_total * info->subslice_per_slice;
 
/*
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
for (s = 0; s < s_max; s++) {
if (!(s_enable & (0x1 << s)))
/* skip disabled slice */
continue;
 
for (ss = 0; ss < ss_max; ss++) {
u32 n_disabled;
 
if (ss_disable & (0x1 << ss))
/* skip disabled subslice */
continue;
 
n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
 
/*
* Record which subslices have 7 EUs.
*/
if (eu_max - n_disabled == 7)
info->subslice_7eu[s] |= 1 << ss;
 
info->eu_total += eu_max - n_disabled;
}
}
 
/*
* BDW is expected to always have a uniform distribution of EU across
* subslices with the exception that any one EU in any one subslice may
* be fused off for die recovery.
*/
info->eu_per_subslice = info->subslice_total ?
DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
 
/*
* BDW supports slice power gating on devices with more than
* one slice.
*/
info->has_slice_pg = (info->slice_total > 1);
info->has_subslice_pg = 0;
info->has_eu_pg = 0;
}
 
/*
* Determine various intel_device_info fields at runtime.
*
* Use it when either:
424,7 → 673,19
 
info = (struct intel_device_info *)&dev_priv->info;
 
if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
* one of those, not both. Until we can safely expose the topmost plane
* as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
* we don't expose the topmost plane at all to prevent ABI breakage
* down the line.
*/
if (IS_BROXTON(dev)) {
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
} else if (IS_VALLEYVIEW(dev))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
457,8 → 718,46
info->num_pipes = 0;
}
}
 
/* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev))
cherryview_sseu_info_init(dev);
else if (IS_BROADWELL(dev))
broadwell_sseu_info_init(dev);
else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev);
 
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
DRM_DEBUG_DRIVER("has slice power gating: %s\n",
info->has_slice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
info->has_subslice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has EU power gating: %s\n",
info->has_eu_pg ? "y" : "n");
}
 
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
if (!IS_VALLEYVIEW(dev_priv))
return;
 
/*
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
* CHV x1 PHY (DP/HDMI D)
* IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
*/
if (IS_CHERRYVIEW(dev_priv)) {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
} else {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
}
}
 
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
479,7 → 778,6
 
info = (struct intel_device_info *) flags;
 
 
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
498,8 → 796,10
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->csr_lock);
mutex_init(&dev_priv->av_mutex);
 
intel_pm_setup(dev);
 
545,14 → 845,21
 
intel_uncore_init(dev);
 
/* Load CSR Firmware for SKL */
intel_csr_ucode_init(dev);
 
ret = i915_gem_gtt_init(dev);
if (ret)
goto out_regs;
goto out_freecsr;
 
ret = i915_kick_out_vgacon(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
goto out_gtt;
}
 
pci_set_master(dev->pdev);
 
/* overlay on gen2 is broken and can't address above 1G */
 
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located
571,6 → 878,7
goto out_gtt;
}
 
 
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
601,8 → 909,6
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
 
intel_setup_bios(dev);
 
i915_gem_load(dev);
 
/* On the 945G/GM, the chipset reports the MSI capability on the
619,6 → 925,8
 
intel_device_info_runtime_init(dev);
 
intel_init_dpio(dev_priv);
 
// if (INTEL_INFO(dev)->num_pipes) {
// ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
// if (ret)
627,14 → 935,18
 
intel_power_domains_init(dev_priv);
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_power_well;
}
}
 
/*
* Notify a valid surface after modesetting,
* when running inside a VM.
*/
if (intel_vgpu_active(dev))
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
 
if (INTEL_INFO(dev)->num_pipes) {
/* Must be done after probing outputs */
655,7 → 967,8
 
out_mtrrfree:
out_gtt:
out_regs:
i915_global_gtt_cleanup(dev);
out_freecsr:
put_bridge:
free_priv:
kfree(dev_priv);
663,12 → 976,13
}
 
#if 0
 
int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
i915_audio_component_cleanup(dev_priv);
 
ret = i915_gem_suspend(dev);
if (ret) {
DRM_ERROR("failed to idle hardware: %d\n", ret);
675,13 → 989,14
return ret;
}
 
intel_power_domains_fini(dev_priv);
 
intel_gpu_ips_teardown();
 
i915_teardown_sysfs(dev);
 
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
 
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
688,7 → 1003,10
 
acpi_video_unregister();
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
 
drm_vblank_cleanup(dev);
 
intel_modeset_cleanup(dev);
 
/*
700,14 → 1018,16
dev_priv->vbt.child_dev = NULL;
dev_priv->vbt.child_dev_num = 0;
}
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
 
/* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_work_sync(&dev_priv->gpu_error.work);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
 
if (dev->pdev->msi_enabled)
715,22 → 1035,25
 
intel_opregion_fini(dev);
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
 
intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev);
}
 
intel_csr_ucode_fini(dev);
 
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
 
destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
 
i915_global_gtt_cleanup(dev);
739,9 → 1062,9
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
 
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
 
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
pci_dev_put(dev_priv->bridge_dev);
kfree(dev_priv);
 
786,7 → 1109,6
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_modeset_preclose(dev, file);
}
 
799,6 → 1121,13
kfree(file_priv);
}
 
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
return -ENODEV;
}
 
const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
807,7 → 1136,7
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
817,50 → 1146,42
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
};
 
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
 
/*
* This is really ugly: Because old userspace abused the linux agp interface to
* manage the gtt, we need to claim that all intel devices are agp. For
* otherwise the drm core refuses to initialize the agp support code.
*/
int i915_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
#endif
/drivers/video/drm/i915/i915_drv.c
27,7 → 27,7
*
*/
 
//#include <linux/device.h>
#include <linux/device.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
35,6 → 35,7
#include "intel_drv.h"
 
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
#include <drm/i915_pciids.h>
43,7 → 44,6
 
#include <syscall.h>
 
#
static struct drm_driver driver;
 
#define GEN_DEFAULT_PIPEOFFSETS \
324,7 → 324,6
};
 
static const struct intel_device_info intel_cherryview_info = {
.is_preliminary = 1,
.gen = 8, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
335,7 → 334,6
};
 
static const struct intel_device_info intel_skylake_info = {
.is_preliminary = 1,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
342,11 → 340,38
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_skylake_gt3_info = {
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_broxton_info = {
.is_preliminary = 1,
.gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
380,7 → 405,10
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
INTEL_CHV_IDS(&intel_cherryview_info), \
INTEL_SKL_IDS(&intel_skylake_info)
INTEL_SKL_GT1_IDS(&intel_skylake_info), \
INTEL_SKL_GT2_IDS(&intel_skylake_info), \
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \
INTEL_BXT_IDS(&intel_broxton_info)
 
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
392,7 → 420,34
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
enum intel_pch ret = PCH_NOP;
 
/*
* In a virtualized passthrough environment we can be in a
* setup where the ISA bridge is not able to be passed through.
* In this case, a south bridge can be emulated and we have to
* make an educated guess as to which PCH is really there.
*/
 
if (IS_GEN5(dev)) {
ret = PCH_IBX;
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
ret = PCH_CPT;
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev)) {
ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
}
 
return ret;
}
 
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
438,19 → 493,13
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_HSW_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("This is Broadwell, assuming "
"LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_HSW_ULT(dev));
WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
459,6 → 508,8
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
} else
continue;
 
497,6 → 548,26
}
 
#if 0
void i915_firmware_load_error_print(const char *fw_path, int err)
{
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
 
/*
* If the reason is not known assume -ENOENT since that's the most
* usual failure mode.
*/
if (!err)
err = -ENOENT;
 
if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
return;
 
DRM_ERROR(
"The driver is built-in, so to load the firmware you need to\n"
"include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
"in your initrd/initramfs image.\n");
}
 
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
515,12 → 586,15
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
 
 
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
pci_power_t opregion_target_state;
int error;
 
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
535,10 → 609,6
 
pci_save_state(dev->pdev);
 
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error;
 
error = i915_gem_suspend(dev);
if (error) {
dev_err(&dev->pdev->dev,
546,6 → 616,8
return error;
}
 
intel_guc_suspend(dev);
 
intel_suspend_gt_powersave(dev);
 
/*
553,8 → 625,7
* for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
for_each_crtc(dev, crtc)
intel_crtc_control(crtc, false);
intel_display_suspend(dev);
drm_modeset_unlock_all(dev);
 
intel_dp_mst_suspend(dev);
565,7 → 636,6
intel_suspend_encoders(dev_priv);
 
intel_suspend_hw(dev);
}
 
i915_gem_suspend_gtt_mappings(dev);
 
590,7 → 660,7
return 0;
}
 
static int i915_drm_suspend_late(struct drm_device *drm_dev)
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{
struct drm_i915_private *dev_priv = drm_dev->dev_private;
int ret;
604,12 → 674,25
}
 
pci_disable_device(drm_dev->pdev);
/*
* During hibernation on some platforms the BIOS may try to access
* the device even though it's already in D3 and hang the machine. So
* leave the device in D0 on those platforms and hope the BIOS will
* power down the device properly. The issue was seen on multiple old
* GENs with different BIOS vendors, so having an explicit blacklist
* is inpractical; apply the workaround on everything pre GEN6. The
* platforms where the issue was seen:
* Lenovo Thinkpad X301, X61s, X60, T60, X41
* Fujitsu FSC S7110
* Acer Aspire 1830T
*/
if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
return 0;
}
 
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
{
int error;
 
630,7 → 713,7
if (error)
return error;
 
return i915_drm_suspend_late(dev);
return i915_drm_suspend_late(dev, false);
}
 
static int i915_drm_resume(struct drm_device *dev)
637,29 → 720,34
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
 
i915_restore_state(dev);
intel_opregion_setup(dev);
 
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_init_pch_refclk(dev);
drm_mode_config_reset(dev);
 
/*
* Interrupts have to be enabled before any batches are run. If not the
* GPU will hang. i915_gem_init_hw() will initiate batches to
* update/restore the context.
*
* Modeset enabling in intel_modeset_init_hw() also needs working
* interrupts.
*/
intel_runtime_pm_enable_interrupts(dev_priv);
 
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);
 
/* We need working interrupts for modeset enabling ... */
intel_runtime_pm_enable_interrupts(dev_priv);
intel_guc_resume(dev);
 
intel_modeset_init_hw(dev);
 
669,7 → 757,7
spin_unlock_irq(&dev_priv->irq_lock);
 
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
intel_display_resume(dev);
drm_modeset_unlock_all(dev);
 
intel_dp_mst_resume(dev);
683,7 → 771,6
intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
}
 
intel_opregion_init(dev);
 
722,11 → 809,16
if (IS_VALLEYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false);
if (ret)
DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
 
intel_uncore_early_sanitize(dev, true);
 
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
 
intel_uncore_sanitize(dev);
735,7 → 827,7
return ret;
}
 
int i915_resume_legacy(struct drm_device *dev)
int i915_resume_switcheroo(struct drm_device *dev)
{
int ret;
 
770,8 → 862,7
bool simulated;
int ret;
 
if (!i915.reset)
return 0;
intel_reset_gt_powersave(dev);
 
mutex_lock(&dev->struct_mutex);
 
815,7 → 906,7
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
 
830,12 → 921,6
}
 
/*
* FIXME: This races pretty badly against concurrent holders of
* ring interrupts. This is possible since we've started to drop
* dev->struct_mutex in select places when waiting for the gpu.
*/
 
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
843,9 → 928,6
*/
if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
 
return 0;
}
869,8 → 951,6
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
 
driver.driver_features &= ~(DRIVER_USE_AGP);
 
return drm_get_pci_dev(pdev, ent, &driver);
}
 
900,11 → 980,10
 
static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
 
/*
* We have a suspedn ordering issue with the snd-hda driver also
* We have a suspend ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
* parent/child relationship we currently solve this with an late
* suspend hook.
915,13 → 994,22
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
return i915_drm_suspend_late(drm_dev);
return i915_drm_suspend_late(drm_dev, false);
}
 
static int i915_pm_poweroff_late(struct device *dev)
{
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
 
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
return i915_drm_suspend_late(drm_dev, true);
}
 
static int i915_pm_resume_early(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
 
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
931,8 → 1019,7
 
static int i915_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
 
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
940,6 → 1027,15
return i915_drm_resume(drm_dev);
}
 
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
{
/* Enabling DC6 is not a hard requirement to enter runtime D3 */
 
skl_uninit_cdclk(dev_priv);
 
return 0;
}
 
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
947,7 → 1043,49
return 0;
}
 
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
/* TODO: when DC5 support is added disable DC5 here. */
 
broxton_ddi_phy_uninit(dev);
broxton_uninit_cdclk(dev);
bxt_enable_dc9(dev_priv);
 
return 0;
}
 
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
/* TODO: when CSR FW support is added make sure the FW is loaded */
 
bxt_disable_dc9(dev_priv);
 
/*
* TODO: when DC5 support is added enable DC5 here if the CSR FW
* is available.
*/
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
intel_prepare_ddi(dev);
 
return 0;
}
 
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
skl_init_cdclk(dev_priv);
intel_csr_load_program(dev);
 
return 0;
}
 
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
* defined in the VLV2_S0IXRegs document. This documents marks all Gunit
986,10 → 1124,10
s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
 
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
 
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
 
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
s->ecochk = I915_READ(GAM_ECOCHK);
1030,7 → 1168,7
s->pm_ier = I915_READ(GEN6_PMIER);
 
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
 
/* GT SA CZ domain, 0x100000-0x138124 */
s->tilectl = I915_READ(TILECTL);
1042,6 → 1180,7
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
s->pcbr = I915_READ(VLV_PCBR);
s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
 
/*
1067,10 → 1206,10
I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
 
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
 
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
 
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
I915_WRITE(GAM_ECOCHK, s->ecochk);
1111,7 → 1250,7
I915_WRITE(GEN6_PMIER, s->pm_ier);
 
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
 
/* GT SA CZ domain, 0x100000-0x138124 */
I915_WRITE(TILECTL, s->tilectl);
1136,6 → 1275,7
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
I915_WRITE(VLV_PCBR, s->pcbr);
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
#endif
1145,19 → 1285,7
u32 val;
int err;
 
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
 
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
/* Wait for a previous force-off to settle */
if (force_on) {
err = wait_for(!COND, 20);
if (err) {
DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
return err;
}
}
 
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1260,6 → 1388,8
err = vlv_allow_gt_wake(dev_priv, false);
if (err)
goto err2;
 
if (!IS_CHERRYVIEW(dev_priv->dev))
vlv_save_gunit_s0ix_state(dev_priv);
 
err = vlv_force_gfx_clock(dev_priv, false);
1291,6 → 1421,7
*/
ret = vlv_force_gfx_clock(dev_priv, true);
 
if (!IS_CHERRYVIEW(dev_priv->dev))
vlv_restore_gunit_s0ix_state(dev_priv);
 
err = vlv_allow_gt_wake(dev_priv, true);
1324,8 → 1455,6
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
 
assert_force_wake_inactive(dev_priv);
 
DRM_DEBUG_KMS("Suspending device\n");
 
/*
1352,6 → 1481,8
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
intel_guc_suspend(dev);
 
intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
 
1363,7 → 1494,8
return ret;
}
 
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
intel_uncore_forcewake_reset(dev, false);
dev_priv->pm.suspended = true;
 
/*
1370,8 → 1502,16
* FIXME: We really should find a document that references the arguments
* used below!
*/
if (IS_HASWELL(dev)) {
if (IS_BROADWELL(dev)) {
/*
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
* being detected, and the call we do at intel_runtime_resume()
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it.
*/
intel_opregion_notify_adapter(dev, PCI_D3hot);
} else {
/*
* current versions of firmware which depend on this opregion
* notification have repurposed the D1 definition to mean
* "runtime suspended" vs. what you would normally expect (D3)
1379,18 → 1519,10
* the suspend path.
*/
intel_opregion_notify_adapter(dev, PCI_D1);
} else {
/*
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
* being detected, and the call we do at intel_runtime_resume()
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it. Let's
* assume the other non-Haswell platforms will stay the same as
* Broadwell.
*/
intel_opregion_notify_adapter(dev, PCI_D3hot);
}
 
assert_forcewakes_inactive(dev_priv);
 
DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
1410,8 → 1542,15
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
 
intel_guc_resume(dev);
 
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
 
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
1425,6 → 1564,15
gen6_update_ring_freq(dev);
 
intel_runtime_pm_enable_interrupts(dev_priv);
 
/*
* On VLV/CHV display interrupts are part of the display
* power well, so hpd is reinitialized from there. For
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv))
intel_hpd_init(dev_priv);
 
intel_enable_gt_powersave(dev);
 
if (ret)
1441,12 → 1589,15
*/
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int ret;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
if (IS_BROXTON(dev_priv))
ret = bxt_suspend_complete(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_suspend_complete(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev))
else if (IS_VALLEYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
else
ret = 0;
1484,7 → 1635,7
.thaw_early = i915_pm_resume_early,
.thaw = i915_pm_resume,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_suspend_late,
.poweroff_late = i915_pm_poweroff_late,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
 
1519,9 → 1670,8
* deal with them for Intel hardware.
*/
.driver_features =
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER,
DRIVER_RENDER | DRIVER_MODESET,
.load = i915_driver_load,
// .unload = i915_driver_unload,
.open = i915_driver_open,
1528,12 → 1678,8
// .lastclose = i915_driver_lastclose,
// .preclose = i915_driver_preclose,
// .postclose = i915_driver_postclose,
// .set_busid = drm_pci_set_busid,
 
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
// .suspend = i915_suspend,
// .resume = i915_resume,
 
// .device_is_agp = i915_driver_device_is_agp,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
1587,4 → 1733,8
}
 
 
MODULE_AUTHOR("Tungsten Graphics, Inc.");
MODULE_AUTHOR("Intel Corporation");
 
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
/drivers/video/drm/i915/i915_drv.h
31,6 → 31,7
#define _I915_DRV_H_
 
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
 
#include "i915_reg.h"
#include "intel_bios.h"
38,7 → 39,7
#include "intel_lrc.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
//#include <linux/io-mapping.h>
#include <linux/scatterlist.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
46,21 → 47,94
#include <drm/drm_gem.h>
//#include <linux/backlight.h>
#include <linux/hashtable.h>
#include <linux/kref.h>
#include "intel_guc.h"
 
#include <linux/spinlock.h>
#include <linux/err.h>
 
extern int i915_fbsize;
extern struct drm_i915_gem_object *main_fb_obj;
extern struct drm_framebuffer *main_framebuffer;
 
static struct drm_i915_gem_object *get_fb_obj()
{
return main_fb_obj;
};
 
#define ioread32(addr) readl(addr)
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
 
 
/* General customization:
*/
 
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20141121"
#define DRIVER_DATE "20151010"
 
#undef WARN_ON
#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
/* Many gcc seem to no see through this and fall over :( */
#if 0
#define WARN_ON(x) ({ \
bool __i915_warn_cond = (x); \
if (__builtin_constant_p(__i915_warn_cond)) \
BUILD_BUG_ON(__i915_warn_cond); \
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
#endif
 
#undef WARN_ON_ONCE
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
 
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
 
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
* which may not necessarily be a user visible problem. This will either
* WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
* enable distros and users to tailor their preferred amount of i915 abrt
* spam.
*/
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
WARN(1, format); \
else \
DRM_ERROR(format); \
} \
unlikely(__ret_warn_on); \
})
 
#define I915_STATE_WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
WARN(1, "WARN_ON(" #condition ")\n"); \
else \
DRM_ERROR("WARN_ON(" #condition ")\n"); \
} \
unlikely(__ret_warn_on); \
})
 
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
}
 
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
81,17 → 155,17
#define transcoder_name(t) ((t) + 'A')
 
/*
* This is the maximum (across all platforms) number of planes (primary +
* sprites) that can be active at the same time on one pipe.
*
* This value doesn't count the cursor plane.
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
* number of planes per CRTC. Not all platforms really have this many planes,
* which means some arrays of size I915_MAX_PLANES may have unused entries
* between the topmost sprite plane and the cursor plane.
*/
#define I915_MAX_PLANES 3
 
enum plane {
PLANE_A = 0,
PLANE_B,
PLANE_C,
PLANE_CURSOR,
I915_MAX_PLANES,
};
#define plane_name(p) ((p) + 'A')
 
138,6 → 212,7
POWER_DOMAIN_PORT_DDI_C_4_LANES,
POWER_DOMAIN_PORT_DDI_D_2_LANES,
POWER_DOMAIN_PORT_DDI_D_4_LANES,
POWER_DOMAIN_PORT_DDI_E_2_LANES,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
144,6 → 219,11
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_PLLS,
POWER_DOMAIN_AUX_A,
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_INIT,
 
POWER_DOMAIN_NUM,
158,17 → 238,51
 
enum hpd_pin {
HPD_NONE = 0,
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
HPD_CRT,
HPD_SDVO_B,
HPD_SDVO_C,
HPD_PORT_A,
HPD_PORT_B,
HPD_PORT_C,
HPD_PORT_D,
HPD_PORT_E,
HPD_NUM_PINS
};
 
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
 
struct i915_hotplug {
struct work_struct hotplug_work;
 
struct {
unsigned long last_jiffies;
int count;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
HPD_MARK_DISABLED = 2
} state;
} stats[HPD_NUM_PINS];
u32 event_bits;
struct delayed_work reenable_work;
 
struct intel_digital_port *irq_port[I915_MAX_PORTS];
u32 long_port_mask;
u32 short_port_mask;
struct work_struct dig_port_work;
 
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
* the non-DP HPD could block the workqueue on a mode config
* mutex getting, that userspace may have taken. However
* userspace is waiting on the DP workqueue to run which is
* blocked behind the non-DP one.
*/
struct workqueue_struct *dp_wq;
};
 
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
178,13 → 292,29
 
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
#define for_each_plane(pipe, p) \
for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
#define for_each_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
(__p)++)
#define for_each_sprite(__dev_priv, __p, __s) \
for ((__s) = 0; \
(__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
(__s)++)
 
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
 
#define for_each_intel_plane(dev, intel_plane) \
list_for_each_entry(intel_plane, \
&dev->mode_config.plane_list, \
base.head)
 
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \
base.head) \
if ((intel_plane)->pipe == (intel_crtc)->pipe)
 
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
 
193,6 → 323,11
&(dev)->mode_config.encoder_list, \
base.head)
 
#define for_each_intel_connector(dev, intel_connector) \
list_for_each_entry(intel_connector, \
&dev->mode_config.connector_list, \
base.head)
 
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
209,6 → 344,30
struct i915_mm_struct;
struct i915_mmu_object;
 
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
 
struct {
spinlock_t lock;
struct list_head request_list;
/* 20ms is a fairly arbitrary limit (greater than the average frame time)
* chosen to prevent the CPU getting more than a frame ahead of the GPU
* (when using lax throttling for the frontbuffer). We also use it to
* offer free GPU waitboosts for severely congested workloads.
*/
#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
} mm;
struct idr context_idr;
 
struct intel_rps_client {
struct list_head link;
unsigned boosts;
} rps;
 
struct intel_engine_cs *bsd_ring;
};
 
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
217,6 → 376,8
/* hsw/bdw */
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
DPLL_ID_SPLL = 2,
 
/* skl */
DPLL_ID_SKL_DPLL1 = 0,
DPLL_ID_SKL_DPLL2 = 1,
233,11 → 394,12
 
/* hsw, bdw */
uint32_t wrpll;
uint32_t spll;
 
/* skl */
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
* lower part of crtl1 and they get shifted into position when writing
* lower part of ctrl1 and they get shifted into position when writing
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
244,6 → 406,10
uint32_t ctrl1;
/* HDMI only, 0 when used for DP */
uint32_t cfgcr1, cfgcr2;
 
/* bxt */
uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
pcsdw12;
};
 
struct intel_shared_dpll_config {
253,7 → 419,6
 
struct intel_shared_dpll {
struct intel_shared_dpll_config config;
struct intel_shared_dpll_config *new_config;
 
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
313,14 → 478,14
struct opregion_asle;
 
struct intel_opregion {
struct opregion_header __iomem *header;
struct opregion_acpi __iomem *acpi;
struct opregion_swsci __iomem *swsci;
struct opregion_header *header;
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
u32 swsci_gbda_sub_functions;
u32 swsci_sbcb_sub_functions;
struct opregion_asle __iomem *asle;
void __iomem *vbt;
u32 __iomem *lid_state;
struct opregion_asle *asle;
void *vbt;
u32 *lid_state;
struct work_struct asle_work;
};
#define OPREGION_SIZE (8*1024)
355,6 → 520,7
struct timeval time;
 
char error_msg[128];
int iommu;
u32 reset_count;
u32 suspend_count;
 
368,6 → 534,8
u32 forcewake;
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
u32 fault_data0; /* gen8, gen9 */
u32 fault_data1; /* gen8, gen9 */
u32 done_reg;
u32 gac_eco;
u32 gam_ecochk;
377,6 → 545,7
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore_obj;
 
struct drm_i915_error_ring {
bool valid;
393,6 → 562,7
u32 semaphore_seqno[I915_NUM_RINGS - 1];
 
/* Register state */
u32 start;
u32 tail;
u32 head;
u32 ctl;
413,7 → 583,7
 
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
u64 gtt_offset;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
 
438,8 → 608,8
struct drm_i915_error_buffer {
u32 size;
u32 name;
u32 rseqno, wseqno;
u32 gtt_offset;
u32 rseqno[I915_NUM_RINGS], wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
458,16 → 628,13
 
struct intel_connector;
struct intel_encoder;
struct intel_crtc_config;
struct intel_plane_config;
struct intel_crtc_state;
struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
 
struct drm_i915_display_funcs {
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
/**
484,7 → 651,7
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
493,20 → 660,21
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
void (*get_plane_config)(struct intel_crtc *,
struct intel_plane_config *);
int (*crtc_compute_clock)(struct intel_crtc *crtc);
struct intel_crtc_state *);
void (*get_initial_plane_config)(struct intel_crtc *,
struct intel_initial_plane_config *);
int (*crtc_compute_clock)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode);
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
513,7 → 681,7
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
struct drm_i915_gem_request *req,
uint32_t flags);
void (*update_primary_plane)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
524,20 → 692,30
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
};
 
int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get_backlight)(struct intel_connector *connector);
void (*set_backlight)(struct intel_connector *connector,
uint32_t level);
void (*disable_backlight)(struct intel_connector *connector);
void (*enable_backlight)(struct intel_connector *connector);
enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
FW_DOMAIN_ID_BLITTER,
FW_DOMAIN_ID_MEDIA,
 
FW_DOMAIN_ID_COUNT
};
 
enum forcewake_domains {
FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
FORCEWAKE_ALL = (FORCEWAKE_RENDER |
FORCEWAKE_BLITTER |
FORCEWAKE_MEDIA)
};
 
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
int fw_engine);
enum forcewake_domains domains);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
int fw_engine);
enum forcewake_domains domains);
 
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
560,15 → 738,48
struct intel_uncore_funcs funcs;
 
unsigned fifo_count;
unsigned forcewake_count;
enum forcewake_domains fw_domains;
 
unsigned fw_rendercount;
unsigned fw_mediacount;
unsigned fw_blittercount;
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
enum forcewake_domain_id id;
unsigned wake_count;
struct timer_list timer;
u32 reg_set;
u32 val_set;
u32 val_clear;
u32 reg_ack;
u32 reg_post;
u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
};
 
struct timer_list force_wake_timer;
/* Iterate over initialised fw domains */
#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
(i__) < FW_DOMAIN_ID_COUNT; \
(i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
 
#define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
 
enum csr_state {
FW_UNINITIALIZED = 0,
FW_LOADED,
FW_FAILED
};
 
struct intel_csr {
const char *fw_path;
uint32_t *dmc_payload;
uint32_t dmc_fw_size;
uint32_t mmio_count;
uint32_t mmioaddr[8];
uint32_t mmiodata[8];
enum csr_state state;
};
 
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
612,6 → 823,18
int trans_offsets[I915_MAX_TRANSCODERS];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
 
/* Slice/subslice/EU info */
u8 slice_total;
u8 subslice_total;
u8 subslice_per_slice;
u8 eu_total;
u8 eu_per_subslice;
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
u8 subslice_7eu[3];
u8 has_slice_pg:1;
u8 has_subslice_pg:1;
u8 has_eu_pg:1;
};
 
#undef DEFINE_FLAG
637,6 → 860,11
/* Time when this context was last blamed for a GPU reset */
unsigned long guilty_ts;
 
/* If the contexts causes a second GPU hang within this time,
* it is permanently banned from submitting any more work.
*/
unsigned long ban_period_seconds;
 
/* This context is banned to submit more work */
bool banned;
};
643,16 → 871,20
 
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_HANDLE 0
 
#define CONTEXT_NO_ZEROMAP (1<<0)
/**
* struct intel_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
* @flags: context specific flags:
* CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
* @file_priv: filp associated with this context (NULL for global default
* context).
* @hang_stats: information about the role of this context in possible GPU
* hangs.
* @vm: virtual memory space used by this context.
* @ppgtt: virtual memory space used by this context.
* @legacy_hw_ctx: render context backing object and whether it is correctly
* initialized (legacy ring submission mechanism only).
* @link: link in the global list of contexts.
664,6 → 896,8
struct kref ref;
int user_handle;
uint8_t remap_slice;
struct drm_i915_private *i915;
int flags;
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
675,21 → 909,33
} legacy_hw_ctx;
 
/* Execlists */
bool rcs_initialized;
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int unpin_count;
int pin_count;
} engine[I915_NUM_RINGS];
 
struct list_head link;
};
 
enum fb_op_origin {
ORIGIN_GTT,
ORIGIN_CPU,
ORIGIN_CS,
ORIGIN_FLIP,
ORIGIN_DIRTYFB,
};
 
struct i915_fbc {
unsigned long size;
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
struct mutex lock;
unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
enum plane plane;
unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
struct intel_crtc *crtc;
int y;
 
struct drm_mm_node compressed_fb;
701,17 → 947,9
* possible. */
bool enabled;
 
/* On gen8 some rings cannont perform fbc clean operation so for now
* we are doing this on SW with mmio.
* This variable works in the opposite information direction
* of ring->fbc_dirty telling software on frontbuffer tracking
* to perform the cache clean on sw side.
*/
bool need_sw_cache_clean;
 
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
struct intel_crtc *crtc;
struct drm_framebuffer *fb;
} *fbc_work;
 
727,14 → 965,45
FBC_MULTIPLE_PIPES, /* more than one pipe active */
FBC_MODULE_PARAM,
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
FBC_ROTATION, /* rotation is not supported */
FBC_IN_DBG_MASTER, /* kernel debugger is active */
FBC_BAD_STRIDE, /* stride is not supported */
FBC_PIXEL_RATE, /* pixel rate is too big */
FBC_PIXEL_FORMAT /* pixel format is invalid */
} no_fbc_reason;
 
bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
void (*enable_fbc)(struct intel_crtc *crtc);
void (*disable_fbc)(struct drm_i915_private *dev_priv);
};
 
/**
* HIGH_RR is the highest eDP panel refresh rate read from EDID
* LOW_RR is the lowest eDP panel refresh rate found from EDID
* parsing for same resolution.
*/
enum drrs_refresh_rate_type {
DRRS_HIGH_RR,
DRRS_LOW_RR,
DRRS_MAX_RR, /* RR count */
};
 
enum drrs_support_type {
DRRS_NOT_SUPPORTED = 0,
STATIC_DRRS_SUPPORT = 1,
SEAMLESS_DRRS_SUPPORT = 2
};
 
struct intel_dp;
struct i915_drrs {
struct intel_connector *connector;
struct mutex mutex;
struct delayed_work work;
struct intel_dp *dp;
unsigned busy_frontbuffer_bits;
enum drrs_refresh_rate_type refresh_rate_type;
enum drrs_support_type type;
};
 
struct intel_dp;
struct i915_psr {
struct mutex lock;
bool sink_support;
743,6 → 1012,8
bool active;
struct delayed_work work;
unsigned busy_frontbuffer_bits;
bool psr2_support;
bool aux_frame_sync;
};
 
enum intel_pch {
779,150 → 1050,21
};
 
struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
u32 savePIPEBSRC;
u32 saveFPA0;
u32 saveFPA1;
u32 saveDPLL_A;
u32 saveDPLL_A_MD;
u32 saveHTOTAL_A;
u32 saveHBLANK_A;
u32 saveHSYNC_A;
u32 saveVTOTAL_A;
u32 saveVBLANK_A;
u32 saveVSYNC_A;
u32 saveBCLRPAT_A;
u32 saveTRANSACONF;
u32 saveTRANS_HTOTAL_A;
u32 saveTRANS_HBLANK_A;
u32 saveTRANS_HSYNC_A;
u32 saveTRANS_VTOTAL_A;
u32 saveTRANS_VBLANK_A;
u32 saveTRANS_VSYNC_A;
u32 savePIPEASTAT;
u32 saveDSPASTRIDE;
u32 saveDSPASIZE;
u32 saveDSPAPOS;
u32 saveDSPAADDR;
u32 saveDSPASURF;
u32 saveDSPATILEOFF;
u32 savePFIT_PGM_RATIOS;
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
u32 saveFPB1;
u32 saveDPLL_B;
u32 saveDPLL_B_MD;
u32 saveHTOTAL_B;
u32 saveHBLANK_B;
u32 saveHSYNC_B;
u32 saveVTOTAL_B;
u32 saveVBLANK_B;
u32 saveVSYNC_B;
u32 saveBCLRPAT_B;
u32 saveTRANSBCONF;
u32 saveTRANS_HTOTAL_B;
u32 saveTRANS_HBLANK_B;
u32 saveTRANS_HSYNC_B;
u32 saveTRANS_VTOTAL_B;
u32 saveTRANS_VBLANK_B;
u32 saveTRANS_VSYNC_B;
u32 savePIPEBSTAT;
u32 saveDSPBSTRIDE;
u32 saveDSPBSIZE;
u32 saveDSPBPOS;
u32 saveDSPBADDR;
u32 saveDSPBSURF;
u32 saveDSPBTILEOFF;
u32 saveVGA0;
u32 saveVGA1;
u32 saveVGA_PD;
u32 saveVGACNTRL;
u32 saveADPA;
u32 saveLVDS;
u32 savePP_ON_DELAYS;
u32 savePP_OFF_DELAYS;
u32 saveDVOA;
u32 saveDVOB;
u32 saveDVOC;
u32 savePP_ON;
u32 savePP_OFF;
u32 savePP_CONTROL;
u32 savePP_DIVISOR;
u32 savePFIT_CONTROL;
u32 save_palette_a[256];
u32 save_palette_b[256];
u32 saveFBC_CONTROL;
u32 saveIER;
u32 saveIIR;
u32 saveIMR;
u32 saveDEIER;
u32 saveDEIMR;
u32 saveGTIER;
u32 saveGTIMR;
u32 saveFDI_RXA_IMR;
u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF2[3];
u8 saveMSR;
u8 saveSR[8];
u8 saveGR[25];
u8 saveAR_INDEX;
u8 saveAR[21];
u8 saveDACMASK;
u8 saveCR[37];
u32 saveSWF3[3];
uint64_t saveFENCE[I915_MAX_NUM_FENCES];
u32 saveCURACNTR;
u32 saveCURAPOS;
u32 saveCURABASE;
u32 saveCURBCNTR;
u32 saveCURBPOS;
u32 saveCURBBASE;
u32 saveCURSIZE;
u32 saveDP_B;
u32 saveDP_C;
u32 saveDP_D;
u32 savePIPEA_GMCH_DATA_M;
u32 savePIPEB_GMCH_DATA_M;
u32 savePIPEA_GMCH_DATA_N;
u32 savePIPEB_GMCH_DATA_N;
u32 savePIPEA_DP_LINK_M;
u32 savePIPEB_DP_LINK_M;
u32 savePIPEA_DP_LINK_N;
u32 savePIPEB_DP_LINK_N;
u32 saveFDI_RXA_CTL;
u32 saveFDI_TXA_CTL;
u32 saveFDI_RXB_CTL;
u32 saveFDI_TXB_CTL;
u32 savePFA_CTL_1;
u32 savePFB_CTL_1;
u32 savePFA_WIN_SZ;
u32 savePFB_WIN_SZ;
u32 savePFA_WIN_POS;
u32 savePFB_WIN_POS;
u32 savePCH_DREF_CONTROL;
u32 saveDISP_ARB_CTL;
u32 savePIPEA_DATA_M1;
u32 savePIPEA_DATA_N1;
u32 savePIPEA_LINK_M1;
u32 savePIPEA_LINK_N1;
u32 savePIPEB_DATA_M1;
u32 savePIPEB_DATA_N1;
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
985,6 → 1127,7
/* Display 2 CZ domain */
u32 gu_ctl0;
u32 gu_ctl1;
u32 pcbr;
u32 clock_gate_dis2;
};
 
1018,25 → 1161,35
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
u8 idle_freq; /* Frequency to request when we are idle */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
u32 cz_freq;
 
u32 ei_interrupt_count;
u8 up_threshold; /* Current %busy required to uplock */
u8 down_threshold; /* Current %busy required to downclock */
 
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 
spinlock_t client_lock;
struct list_head clients;
bool client_boost;
 
bool enabled;
struct delayed_work delayed_resume_work;
unsigned boosts;
 
struct intel_rps_client semaphores, mmioflips;
 
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
 
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
* Must be taken after struct_mutex if nested. Note that
* this lock may be held for long periods of time when
* talking to hw - so only take it when talking to hw!
*/
struct mutex hw_lock;
};
1061,9 → 1214,6
 
int c_m;
int r_t;
 
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
};
 
struct drm_i915_private;
1133,6 → 1283,10
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Protects the usage of the GTT stolen memory allocator. This is
* always the inner lock when overlapping with struct_mutex. */
struct mutex stolen_lock;
 
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
1220,15 → 1374,14
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
 
struct timer_list hangcheck_timer;
struct workqueue_struct *hangcheck_wq;
struct delayed_work hangcheck_work;
 
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
struct work_struct work;
 
 
unsigned long missed_irq_rings;
 
/**
1283,6 → 1436,15
MODESET_SUSPENDED,
};
 
#define DP_AUX_A 0x40
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
 
#define DDC_PIN_B 0x05
#define DDC_PIN_C 0x04
#define DDC_PIN_D 0x06
 
struct ddi_vbt_port_info {
/*
* This is an index in the HDMI/DVI DDI buffer translation table.
1295,12 → 1457,19
uint8_t supports_dvi:1;
uint8_t supports_hdmi:1;
uint8_t supports_dp:1;
 
uint8_t alternate_aux_channel;
uint8_t alternate_ddc_pin;
 
uint8_t dp_boost_level;
uint8_t hdmi_boost_level;
};
 
enum drrs_support_type {
DRRS_NOT_SUPPORTED = 0,
STATIC_DRRS_SUPPORT = 1,
SEAMLESS_DRRS_SUPPORT = 2
enum psr_lines_to_wait {
PSR_0_LINES_TO_WAIT = 0,
PSR_1_LINE_TO_WAIT,
PSR_4_LINES_TO_WAIT,
PSR_8_LINES_TO_WAIT
};
 
struct intel_vbt_data {
1332,6 → 1501,15
struct edp_power_seq edp_pps;
 
struct {
bool full_link;
bool require_aux_wakeup;
int idle_frames;
enum psr_lines_to_wait lines_to_wait;
int tp1_wakeup_time;
int tp2_tp3_wakeup_time;
} psr;
 
struct {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
1380,6 → 1558,29
enum intel_ddb_partitioning partitioning;
};
 
struct vlv_pipe_wm {
uint16_t primary;
uint16_t sprite[2];
uint8_t cursor;
};
 
struct vlv_sr_wm {
uint16_t plane;
uint8_t cursor;
};
 
struct vlv_wm_values {
struct vlv_pipe_wm pipe[3];
struct vlv_sr_wm sr;
struct {
uint8_t cursor;
uint8_t sprite[2];
uint8_t primary;
} ddl[3];
uint8_t level;
bool cxsr;
};
 
struct skl_ddb_entry {
uint16_t start, end; /* in number of blocks, 'end' is exclusive */
};
1400,8 → 1601,8
 
struct skl_ddb_allocation {
struct skl_ddb_entry pipe[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
struct skl_ddb_entry cursor[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
};
 
struct skl_wm_values {
1409,18 → 1610,13
struct skl_ddb_allocation ddb;
uint32_t wm_linetime[I915_MAX_PIPES];
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
uint32_t cursor[I915_MAX_PIPES][8];
uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
uint32_t cursor_trans[I915_MAX_PIPES];
};
 
struct skl_wm_level {
bool plane_en[I915_MAX_PLANES];
bool cursor_en;
uint16_t plane_res_b[I915_MAX_PLANES];
uint8_t plane_res_l[I915_MAX_PLANES];
uint16_t cursor_res_b;
uint8_t cursor_res_l;
};
 
/*
1506,8 → 1702,27
u32 count;
};
 
struct i915_virtual_gpu {
bool active;
};
 
struct i915_execbuffer_params {
struct drm_device *dev;
struct drm_file *file;
uint32_t dispatch_flags;
uint32_t args_batch_start_offset;
uint64_t batch_obj_vm_offset;
struct intel_engine_cs *ring;
struct drm_i915_gem_object *batch_obj;
struct intel_context *ctx;
struct drm_i915_gem_request *request;
};
 
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *requests;
 
const struct intel_device_info info;
 
1517,9 → 1732,17
 
struct intel_uncore uncore;
 
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
struct i915_virtual_gpu vgpu;
 
struct intel_guc guc;
 
struct intel_csr csr;
 
/* Display CSR-related protection */
struct mutex csr_lock;
 
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
 
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
1550,11 → 1773,9
 
bool display_irqs_enabled;
 
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
// struct pm_qos_request pm_qos;
 
/* DPIO indirect register protection */
struct mutex dpio_lock;
/* Sideband mailbox protection */
struct mutex sb_lock;
 
/** Cached value of IMR to avoid reads in updating the bitfield */
union {
1566,19 → 1787,7
u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
 
struct work_struct hotplug_work;
struct {
unsigned long hpd_last_jiffies;
int hpd_cnt;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
HPD_MARK_DISABLED = 2
} hpd_mark;
} hpd_stats[HPD_NUM_PINS];
u32 hpd_event_bits;
struct delayed_work hotplug_reenable_work;
 
struct i915_hotplug hotplug;
struct i915_fbc fbc;
struct i915_drrs drrs;
struct intel_opregion opregion;
1599,12 → 1808,14
struct mutex pps_mutex;
 
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int vlv_cdclk_freq;
unsigned int skl_boot_cdclk;
unsigned int cdclk_freq, max_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
 
/**
* wq - Driver workqueue for GEM.
1654,9 → 1865,6
 
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
 
struct i915_frontbuffer_tracking fb_tracking;
 
1684,7 → 1892,7
 
struct drm_i915_gem_object *vlv_pctx;
 
#ifdef CONFIG_DRM_I915_FBDEV
#ifdef CONFIG_DRM_FBDEV_EMULATION
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct work_struct fbdev_suspend_work;
1693,11 → 1901,22
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
 
/* hda/i915 audio component */
struct i915_audio_component *audio_component;
bool audio_component_registered;
/**
* av_mutex - mutex for audio/video sync
*
*/
struct mutex av_mutex;
 
uint32_t hw_context_size;
struct list_head context_list;
 
u32 fdi_rx_config;
 
u32 chv_phy_control;
 
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
1732,41 → 1951,29
union {
struct ilk_wm_values hw;
struct skl_wm_values skl_hw;
struct vlv_wm_values vlv;
};
 
uint8_t max_level;
} wm;
 
struct i915_runtime_pm pm;
 
struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
u32 long_hpd_port_mask;
u32 short_hpd_port_mask;
struct work_struct dig_port_work;
 
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
* the non-DP HPD could block the workqueue on a mode config
* mutex getting, that userspace may have taken. However
* userspace is waiting on the DP workqueue to run which is
* blocked behind the non-DP one.
*/
struct workqueue_struct *dp_wq;
 
uint32_t bios_vgacntr;
 
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
int (*execbuf_submit)(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
struct list_head *vmas);
int (*init_rings)(struct drm_device *dev);
void (*cleanup_ring)(struct intel_engine_cs *ring);
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
 
bool edp_low_vswing;
 
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
 
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
1778,6 → 1985,16
return dev->dev_private;
}
 
static inline struct drm_i915_private *dev_to_i915(struct device *dev)
{
return to_i915(dev_get_drvdata(dev));
}
 
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
{
return container_of(guc, struct drm_i915_private, guc);
}
 
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1814,13 → 2031,14
 
/*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-vise. This
* considered to be the frontbuffer for the given plane interface-wise. This
* doesn't mean that the hw necessarily already scans it out, but that any
* rendering (by the cpu or gpu) will land in the frontbuffer eventually.
*
* We have one bit per pipe and per scanout plane type.
*/
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
#define INTEL_FRONTBUFFER_BITS \
(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
1827,12 → 2045,12
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_SPRITE(pipe) \
(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
(1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
(1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
 
struct drm_i915_gem_object {
struct drm_gem_object base;
1846,16 → 2064,18
struct drm_mm_node *stolen;
struct list_head global_list;
 
struct list_head ring_list;
struct list_head ring_list[I915_NUM_RINGS];
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
 
struct list_head batch_pool_link;
 
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
unsigned int active:1;
unsigned int active:I915_NUM_RINGS;
 
/**
* This is set if the object has been written to since last bound
1900,8 → 2120,6
* accurate mappable working set.
*/
unsigned int fault_mappable:1;
unsigned int pin_mappable:1;
unsigned int pin_display:1;
 
/*
* Is the object to be mapped as read-only to the GPU
1909,25 → 2127,37
*/
unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int cache_dirty:1;
 
unsigned int has_dma_mapping:1;
 
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
unsigned int pin_display;
 
struct sg_table *pages;
int pages_pin_count;
struct get_page {
struct scatterlist *sg;
int last;
} get_page;
 
/* prime dma-buf support */
void *dma_buf_vmapping;
int vmapping_count;
 
struct intel_engine_cs *ring;
 
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_read_seqno;
uint32_t last_write_seqno;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
* If there is a writer that necessarily implies that all other
* read requests are complete - but we may only be lazily clearing
* the read requests. A read request is naturally the most recent
* request on a ring, so we may have two different write and read
* requests on one ring where the write request is older than the
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
* */
struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
struct drm_i915_gem_request *last_fenced_req;
 
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
1938,10 → 2168,6
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
 
/** User space pin count and filp owning the pin */
unsigned long user_pin_count;
struct drm_file *pin_filp;
 
union {
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
1970,27 → 2196,61
* The request queue allows us to note sequence numbers that have been emitted
* and may be associated with active buffers to be retired.
*
* By keeping this list, we can avoid having to do questionable
* sequence-number comparisons on buffer last_rendering_seqnos, and associate
* an emission time with seqnos for tracking how far ahead of the GPU we are.
* By keeping this list, we can avoid having to do questionable sequence
* number comparisons on buffer last_read|write_seqno. It also allows an
* emission time to be associated with the request for tracking how far ahead
* of the GPU the submission is.
*
* The requests are reference counted, so upon creation they should have an
* initial reference taken using kref_init
*/
struct drm_i915_gem_request {
struct kref ref;
 
/** On Which ring this request was generated */
struct drm_i915_private *i915;
struct intel_engine_cs *ring;
 
/** GEM sequence number associated with this request. */
uint32_t seqno;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
* this request.
*/
u32 previous_seqno;
 
/** GEM sequence number associated with this request,
* when the HWS breadcrumb is equal or greater than this the GPU
* has finished processing this request.
*/
u32 seqno;
 
/** Position in the ringbuffer of the start of the request */
u32 head;
 
/** Position in the ringbuffer of the end of the request */
/**
* Position in the ringbuffer of the start of the postfix.
* This is required to calculate the maximum available ringbuffer
* space without overwriting the postfix.
*/
u32 postfix;
 
/** Position in the ringbuffer of the end of the whole request */
u32 tail;
 
/** Context related to this request */
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
* context, we must increment the context's refcount, to guarantee that
* it persists while any request is linked to it. Requests themselves
* are also refcounted, so the request will only be freed when the last
* reference to it is dismissed, and the code in
* i915_gem_request_free() will then decrement the refcount on the
* context.
*/
struct intel_context *ctx;
struct intel_ringbuffer *ringbuf;
 
/** Batch buffer related to this request if any */
/** Batch buffer related to this request if any (used for
error state dump only) */
struct drm_i915_gem_object *batch_obj;
 
/** Time at which this request was emitted, in jiffies. */
2002,24 → 2262,98
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
};
 
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
/** process identifier submitting this request */
struct pid *pid;
 
struct {
spinlock_t lock;
struct list_head request_list;
struct delayed_work idle_work;
} mm;
struct idr context_idr;
/**
* The ELSP only accepts two elements at a time, so we queue
* context/tail pairs on a given queue (ring->execlist_queue) until the
* hardware is available. The queue serves a double purpose: we also use
* it to keep track of the up to 2 contexts currently in the hardware
* (usually one in execution and the other queued up by the GPU): We
* only remove elements from the head of the queue when the hardware
* informs us that an element has been completed.
*
* All accesses to the queue are mediated by a spinlock
* (ring->execlist_lock).
*/
 
atomic_t rps_wait_boost;
struct intel_engine_cs *bsd_ring;
/** Execlist link in the submission queue.*/
struct list_head execlist_link;
 
/** Execlists no. of times this request has been sent to the ELSP */
int elsp_submitted;
 
};
 
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
 
static inline uint32_t
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
{
return req ? req->seqno : 0;
}
 
static inline struct intel_engine_cs *
i915_gem_request_get_ring(struct drm_i915_gem_request *req)
{
return req ? req->ring : NULL;
}
 
static inline struct drm_i915_gem_request *
i915_gem_request_reference(struct drm_i915_gem_request *req)
{
if (req)
kref_get(&req->ref);
return req;
}
 
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
 
static inline void
i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
{
struct drm_device *dev;
 
if (!req)
return;
 
dev = req->ring->dev;
if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
}
 
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
if (src)
i915_gem_request_reference(src);
 
if (*pdst)
i915_gem_request_unreference(*pdst);
 
*pdst = src;
}
 
/*
* XXX: i915_gem_request_completed should be here but currently needs the
* definition of i915_seqno_passed() which is below. It will be moved in
* a later patch when the call to i915_seqno_passed() is obsoleted...
*/
 
/*
* A command that requires special handling by the command parser.
*/
struct drm_i915_cmd_descriptor {
2071,10 → 2405,15
* Describes where to find a register address in the command to check
* against the ring's register whitelist. Only valid if flags has the
* CMD_DESC_REGISTER bit set.
*
* A non-zero step value implies that the command may access multiple
* registers in sequence (e.g. LRI), in that case step gives the
* distance in dwords between individual offset fields.
*/
struct {
u32 offset;
u32 mask;
u32 step;
} reg;
 
#define MAX_CMD_DESC_BITMASKS 3
2122,6 → 2461,7
})
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
 
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
2144,21 → 2484,22
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
INTEL_DEVID(dev) == 0x0152 || \
INTEL_DEVID(dev) == 0x015a)
#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
INTEL_DEVID(dev) == 0x0106 || \
INTEL_DEVID(dev) == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
((INTEL_DEVID(dev) & 0xf) == 0x2 || \
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xb || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
/* ULX machines are also considered ULT. */
#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0xf) == 0xe)
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
2168,8 → 2509,32
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
INTEL_DEVID(dev) == 0x0A1E)
#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \
INTEL_DEVID(dev) == 0x1913 || \
INTEL_DEVID(dev) == 0x1916 || \
INTEL_DEVID(dev) == 0x1921 || \
INTEL_DEVID(dev) == 0x1926)
#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
INTEL_DEVID(dev) == 0x1915 || \
INTEL_DEVID(dev) == 0x191E)
#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0030)
 
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
#define SKL_REVID_A0 (0x0)
#define SKL_REVID_B0 (0x1)
#define SKL_REVID_C0 (0x2)
#define SKL_REVID_D0 (0x3)
#define SKL_REVID_E0 (0x4)
#define SKL_REVID_F0 (0x5)
 
#define BXT_REVID_A0 (0x0)
#define BXT_REVID_B0 (0x3)
#define BXT_REVID_C0 (0x9)
 
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
2202,7 → 2567,8
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
#define USES_PPGTT(dev) (i915.enable_ppgtt)
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
 
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
2223,9 → 2589,6
*/
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
 
2235,14 → 2598,31
 
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
 
#define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
INTEL_INFO(dev)->gen >= 9)
 
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
IS_SKYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_SKYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
 
#define HAS_CSR(dev) (IS_GEN9(dev))
 
#define HAS_GUC_UCODE(dev) (IS_GEN9(dev))
#define HAS_GUC_SCHED(dev) (IS_GEN9(dev))
 
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8)
 
#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
2251,10 → 2631,12
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
 
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
2267,20 → 2649,20
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
 
#include "i915_trace.h"
 
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
 
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
extern int i915_resume_switcheroo(struct drm_device *dev);
 
/* i915_params.c */
struct i915_params {
int modeset;
int panel_ignore_lid;
unsigned int powersave;
int semaphores;
unsigned int lvds_downclock;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
2298,11 → 2680,17
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
bool enable_guc_submission;
int guc_log_level;
int use_mmio_flip;
bool mmio_debug;
int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
 
2315,12 → 2703,12
struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file);
extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
extern int intel_gpu_reset(struct drm_device *dev);
extern bool intel_has_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2327,7 → 2715,14
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void i915_firmware_load_error_print(const char *fw_path, int err);
 
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
 
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
2336,7 → 2731,6
const char *fmt, ...);
 
extern void intel_irq_init(struct drm_i915_private *dev_priv);
extern void intel_hpd_init(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
2347,6 → 2741,23
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
/* Like above but the caller must manage the uncore.lock itself.
* Must be used with I915_READ_FW and friends.
*/
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
static inline bool intel_vgpu_active(struct drm_device *dev)
{
return to_i915(dev)->vgpu.active;
}
 
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2358,6 → 2769,9
 
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits);
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void
2386,27 → 2800,15
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring);
void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj);
int i915_gem_ringbuffer_submission(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request *req);
void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
struct list_head *vmas);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2429,12 → 2831,6
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
long target,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
2441,21 → 2837,41
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma);
 
#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
#define PIN_GLOBAL 0x4
#define PIN_OFFSET_BIAS 0x8
/* Flags used by pin/bind&friends. */
#define PIN_MAPPABLE (1<<0)
#define PIN_NONBLOCK (1<<1)
#define PIN_GLOBAL (1<<2)
#define PIN_OFFSET_BIAS (1<<3)
#define PIN_USER (1<<4)
#define PIN_UPDATE (1<<5)
#define PIN_ZONE_4G (1<<6)
#define PIN_HIGH (1<<7)
#define PIN_OFFSET_MASK (~4095)
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
int __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
uint64_t flags);
int __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
uint32_t alignment,
uint64_t flags);
 
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
/*
* BEWARE: Do not use the function below unless you can _absolutely_
* _guarantee_ VMA in question is _not in use_ anywhere.
*/
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2464,15 → 2880,32
int *needs_clflush);
 
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 
static inline int __sg_page_count(struct scatterlist *sg)
{
struct sg_page_iter sg_iter;
return sg->length >> PAGE_SHIFT;
}
 
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
return sg_page_iter_page(&sg_iter);
static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
return NULL;
 
return NULL;
if (n < obj->get_page.last) {
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
}
 
while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
obj->get_page.last += __sg_page_count(obj->get_page.sg++);
if (unlikely(sg_is_chain(obj->get_page.sg)))
obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
}
 
return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
}
 
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages == NULL);
2486,9 → 2919,10
 
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to);
struct intel_engine_cs *to,
struct drm_i915_gem_request **to_req);
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_engine_cs *ring);
struct drm_i915_gem_request *req);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
2503,14 → 2937,23
return (int32_t)(seq1 - seq2) >= 0;
}
 
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->previous_seqno);
}
 
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->seqno);
}
 
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring);
 
2518,7 → 2961,6
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
 
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
2550,30 → 2992,32
 
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
void __i915_add_request(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
bool flush_caches);
#define i915_add_request(req) \
__i915_add_request(req, NULL, true)
#define i915_add_request_no_flush(req) \
__i915_add_request(req, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
struct intel_rps_client *rps);
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
2581,8 → 3025,11
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_engine_cs *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2603,29 → 3050,44
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
 
void i915_gem_restore_fences(struct drm_device *dev);
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view);
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
static inline u64
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
}
 
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
 
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
 
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
 
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->pin_count > 0)
return true;
return false;
static inline struct i915_vma *
i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
}
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
 
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
2648,16 → 3110,10
 
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
 
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
}
 
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
2678,17 → 3134,35
return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
}
 
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
static inline void
i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
{
i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
}
 
/* i915_gem_fence.c */
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
void i915_gem_restore_fences(struct drm_device *dev);
 
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *to);
int i915_switch_context(struct drm_i915_gem_request *req);
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
2713,6 → 3187,10
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
2724,7 → 3202,6
unsigned long end,
unsigned flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
 
/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_device *dev)
2734,9 → 3211,16
}
 
/* i915_gem_stolen.c */
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment);
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
2746,6 → 3230,18
u32 gtt_offset,
u32 size);
 
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
 
 
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
2755,10 → 3251,6
obj->tiling_mode != I915_TILING_NONE;
}
 
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
/* i915_gem_debug.c */
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
2770,8 → 3262,11
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_device *dev);
#else
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
{ return 0; }
static inline void intel_display_crc_init(struct drm_device *dev) {}
#endif
 
2805,7 → 3300,9
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
u32 batch_len,
bool is_master);
 
/* i915_suspend.c */
2812,10 → 3309,6
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
 
/* i915_ums.c */
void i915_save_display_reg(struct drm_device *dev);
void i915_restore_display_reg(struct drm_device *dev);
 
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
void i915_teardown_sysfs(struct drm_device *dev_priv);
2823,13 → 3316,11
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
static inline bool intel_gmbus_is_port_valid(unsigned port)
{
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
}
extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
unsigned int pin);
 
extern struct i2c_adapter *intel_gmbus_get_adapter(
struct drm_i915_private *dev_priv, unsigned port);
extern struct i2c_adapter *
intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2881,17 → 3372,12
extern void intel_modeset_cleanup(struct drm_device *dev);
extern void intel_connector_unregister(struct intel_connector *);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev);
2904,10 → 3390,7
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
 
void intel_notify_mmio_flip(struct intel_engine_cs *ring);
 
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
2916,22 → 3399,13
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
#endif
 
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
 
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
 
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2952,16 → 3426,9
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 
#define FORCEWAKE_RENDER (1 << 0)
#define FORCEWAKE_MEDIA (1 << 1)
#define FORCEWAKE_BLITTER (1 << 2)
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
FORCEWAKE_BLITTER)
 
 
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
 
2985,19 → 3452,29
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
u32 upper = I915_READ(upper_reg); \
u32 lower = I915_READ(lower_reg); \
u32 tmp = I915_READ(upper_reg); \
if (upper != tmp) { \
upper = tmp; \
u32 upper, lower, old_upper, loop = 0; \
upper = I915_READ(upper_reg); \
do { \
old_upper = upper; \
lower = I915_READ(lower_reg); \
WARN_ON(I915_READ(upper_reg) != upper); \
} \
upper = I915_READ(upper_reg); \
} while (upper != old_upper && loop++ < 2); \
(u64)upper << 32 | lower; })
 
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
 
/* These are untraced mmio-accessors that are only valid to be used inside
* criticial sections inside IRQ handlers where forcewake is explicitly
* controlled.
* Think twice, and think again, before using these.
* Note: Should only be used between intel_uncore_forcewake_irqlock() and
* intel_uncore_forcewake_irqunlock().
*/
#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__))
#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__))
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
 
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
#define INTEL_BROADCAST_RGB_FULL 1
3059,74 → 3536,15
 
if (time_after(target_jiffies, tmp_jiffies)) {
remaining_jiffies = target_jiffies - tmp_jiffies;
while ((int)remaining_jiffies > 0) {
delay(remaining_jiffies);
remaining_jiffies = target_jiffies - jiffies;
}
}
}
 
typedef struct
static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
struct drm_i915_gem_request *req)
{
int width;
int height;
int bpp;
int freq;
}videomode_t;
 
struct cmdtable
{
char *key;
int size;
int *val;
};
 
#define CMDENTRY(key, val) {(key), (sizeof(key)-1), &val}
 
void parse_cmdline(char *cmdline, struct cmdtable *table, char *log, videomode_t *mode);
struct drm_i915_gem_object
*kos_gem_fb_object_create(struct drm_device *dev, u32 gtt_offset, u32 size);
 
extern struct drm_i915_gem_object *main_fb_obj;
 
static struct drm_i915_gem_object *get_fb_obj()
{
return main_fb_obj;
};
 
#define ioread32(addr) readl(addr)
 
 
static inline int pm_runtime_get_sync(struct device *dev)
{
return 0;
if (ring->trace_irq_req == NULL && ring->irq_get(ring))
i915_gem_request_assign(&ring->trace_irq_req, req);
}
 
static inline int pm_runtime_set_active(struct device *dev)
{
return 0;
}
 
static inline void pm_runtime_disable(struct device *dev)
{
 
}
 
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
return 0;
}
 
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
 
#endif
/drivers/video/drm/i915/i915_gem.c
1,5 → 1,5
/*
* Copyright © 2008 Intel Corporation
* Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
29,6 → 29,7
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/shmem_fs.h>
36,6 → 37,7
//#include <linux/swap.h>
#include <linux/scatterlist.h>
#include <linux/pci.h>
#define RQ_BUG_ON(expr)
 
extern int x86_clflush_size;
 
44,29 → 46,7
#define MAP_SHARED 0x01 /* Share changes */
 
 
u64 nsecs_to_jiffies64(u64 n)
{
#if (NSEC_PER_SEC % HZ) == 0
/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
return div_u64(n, NSEC_PER_SEC / HZ);
#elif (HZ % 512) == 0
/* overflow after 292 years if HZ = 1024 */
return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* Generic case - optimized for cases where HZ is a multiple of 3.
* overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
*/
return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
#endif
}
 
unsigned long nsecs_to_jiffies(u64 n)
{
return (unsigned long)nsecs_to_jiffies64(n);
}
 
 
struct drm_i915_gem_object *get_fb_obj();
 
unsigned long vm_mmap(struct file *file, unsigned long addr,
80,23 → 60,12
 
 
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
bool force);
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj);
i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
static void
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
 
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
 
 
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
111,18 → 80,6
return obj->pin_display;
}
 
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
 
/* As we do not have an associated fence register, we will force
* a tiling change if we ever need to acquire one.
*/
obj->fence_dirty = false;
obj->fence_reg = I915_FENCE_REG_NONE;
}
 
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
189,12 → 146,6
return 0;
}
 
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_bound_any(obj) && !obj->active;
}
 
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
201,14 → 152,18
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
struct drm_i915_gem_object *obj;
struct i915_gtt *ggtt = &dev_priv->gtt;
struct i915_vma *vma;
size_t pinned;
 
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (i915_gem_obj_is_pinned(obj))
pinned += i915_gem_obj_ggtt_size(obj);
list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
if (vma->pin_count)
pinned += vma->node.size;
list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
if (vma->pin_count)
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
 
args->aper_size = dev_priv->gtt.base.total;
615,6 → 570,8
 
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 
while (remain > 0) {
/* Operation in this page
*
637,6 → 594,8
offset += page_length;
}
 
out_flush:
intel_fb_obj_flush(obj, false, ORIGIN_GTT);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
742,8 → 701,6
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
 
i915_gem_object_retire(obj);
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
755,6 → 712,8
if (ret)
return ret;
 
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
i915_gem_object_pin_pages(obj);
 
offset = args->offset;
828,13 → 787,16
if (!needs_clflush_after &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(dev);
needs_clflush_after = true;
}
}
 
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
else
obj->cache_dirty = true;
 
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
return ret;
}
 
847,6 → 809,7
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
854,10 → 817,11
if (args->size == 0)
return 0;
 
intel_runtime_pm_get(dev_priv);
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
goto put_rpm;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
905,6 → 869,9
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
put_rpm:
intel_runtime_pm_put(dev_priv);
 
return ret;
}
 
922,6 → 889,12
if (i915_terminally_wedged(error))
return -EIO;
 
/*
* Check if GPU Reset is in progress - we need intel_ring_begin
* to work properly to reinit the hw state while the gpu is
* still marked as reset-in-progress. Handle this with a flag.
*/
if (!error->reload_in_reset)
return -EAGAIN;
}
 
928,24 → 901,6
return 0;
}
 
/*
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
 
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
ret = 0;
if (seqno == ring->outstanding_lazy_seqno)
ret = i915_add_request(ring, NULL);
 
return ret;
}
 
static void fake_irq(unsigned long data)
{
// wake_up_process((struct task_struct *)data);
957,19 → 912,79
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
 
static bool can_wait_boost(struct drm_i915_file_private *file_priv)
static unsigned long local_clock_us(unsigned *cpu)
{
if (file_priv == NULL)
unsigned long t;
 
/* Cheaply and approximately convert from nanoseconds to microseconds.
* The result and subsequent calculations are also defined in the same
* approximate microseconds units. The principal source of timing
* error here is from the simple truncation.
*
* Note that local_clock() is only defined wrt to the current CPU;
* the comparisons are no longer valid if we switch CPUs. Instead of
* blocking preemption for the entire busywait, we can detect the CPU
* switch and use that as indicator of system load and a reason to
* stop busywaiting, see busywait_stop().
*/
t = GetClockNs() >> 10;
 
return t;
}
 
static bool busywait_stop(unsigned long timeout, unsigned cpu)
{
unsigned this_cpu = 0;
 
if (time_after(local_clock_us(&this_cpu), timeout))
return true;
 
return !atomic_xchg(&file_priv->rps_wait_boost, true);
return this_cpu != cpu;
}
 
static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
{
unsigned long timeout;
unsigned cpu;
 
/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
* rate. By busywaiting on the request completion for a short while we
* can service the high frequency waits as quick as possible. However,
* if it is a slow request, we want to sleep as quickly as possible.
* The tradeoff between waiting and sleeping is roughly the time it
* takes to sleep on a request, on the order of a microsecond.
*/
 
if (req->ring->irq_refcount)
return -EBUSY;
 
/* Only spin if we know the GPU is processing this request */
if (!i915_gem_request_started(req, true))
return -EAGAIN;
 
timeout = local_clock_us(&cpu) + 5;
while (1 /*!need_resched()*/) {
if (i915_gem_request_completed(req, true))
return 0;
 
if (busywait_stop(timeout, cpu))
break;
 
cpu_relax_lowlatency();
}
 
if (i915_gem_request_completed(req, false))
return 0;
 
return -EAGAIN;
}
 
/**
* __i915_wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @reset_counter: reset sequence associated with the given seqno
* __i915_wait_request - wait until execution of request has finished
* @req: duh!
* @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
980,19 → 995,22
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
* inserted.
*
* Returns 0 if the seqno was found within the alloted time. Else returns the
* Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv)
struct intel_rps_client *rps)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before, now;
 
1001,28 → 1019,43
 
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
 
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
if (list_empty(&req->list))
return 0;
 
timeout_expire = timeout ?
jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
if (i915_gem_request_completed(req, true))
return 0;
 
if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
if (file_priv)
mod_delayed_work(dev_priv->wq,
&file_priv->mm.idle_work,
msecs_to_jiffies(100));
timeout_expire = 0;
if (timeout) {
if (WARN_ON(*timeout < 0))
return -EINVAL;
 
if (*timeout == 0)
return -ETIME;
 
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
}
 
if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
 
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
 
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req, state);
if (ret == 0)
goto out;
 
if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
ret = -ENODEV;
goto out;
}
 
INIT_LIST_HEAD(&__wait.task_list);
__wait.evnt = CreateEvent(NULL, MANUAL_DESTROY);
 
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(ring, seqno);
 
for (;;) {
unsigned long flags;
1038,7 → 1071,7
break;
}
 
if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
if (i915_gem_request_completed(req, false)) {
ret = 0;
break;
}
1061,7 → 1094,6
spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
}
};
trace_i915_gem_request_wait_end(ring, seqno);
 
DestroyEvent(__wait.evnt);
 
1069,54 → 1101,142
ring->irq_put(ring);
 
// finish_wait(&ring->irq_queue, &wait);
out:
now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(req);
 
if (timeout) {
s64 tres = *timeout - (now - before);
 
*timeout = tres < 0 ? 0 : tres;
 
/*
* Apparently ktime isn't accurate enough and occasionally has a
* bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
* things up to make the test happy. We allow up to 1 jiffy.
*
* This is a regrssion from the timespec->ktime conversion.
*/
if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
*timeout = 0;
}
 
return ret;
}
 
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file)
{
struct drm_i915_private *dev_private;
struct drm_i915_file_private *file_priv;
 
WARN_ON(!req || !file || req->file_priv);
 
if (!req || !file)
return -EINVAL;
 
if (req->file_priv)
return -EINVAL;
 
dev_private = req->ring->dev->dev_private;
file_priv = file->driver_priv;
 
spin_lock(&file_priv->mm.lock);
req->file_priv = file_priv;
list_add_tail(&req->client_list, &file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
 
req->pid = 1;
 
return 0;
}
 
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
struct drm_i915_file_private *file_priv = request->file_priv;
 
if (!file_priv)
return;
 
spin_lock(&file_priv->mm.lock);
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
}
 
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
trace_i915_gem_request_retire(request);
 
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*
* Note this requires that we are always called in request
* completion order.
*/
request->ringbuf->last_retired_head = request->postfix;
 
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
 
i915_gem_request_unreference(request);
}
 
static void
__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->ring;
struct drm_i915_gem_request *tmp;
 
 
if (list_empty(&req->list))
return;
 
do {
tmp = list_first_entry(&engine->request_list,
typeof(*tmp), list);
 
i915_gem_request_retire(tmp);
} while (tmp != req);
 
WARN_ON(i915_verify_lists(engine->dev));
}
 
/**
* Waits for a sequence number to be signaled, and cleans up the
* Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
i915_wait_request(struct drm_i915_gem_request *req)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible = dev_priv->mm.interruptible;
unsigned reset_counter;
struct drm_device *dev;
struct drm_i915_private *dev_priv;
bool interruptible;
int ret;
 
BUG_ON(req == NULL);
 
dev = req->ring->dev;
dev_priv = dev->dev_private;
interruptible = dev_priv->mm.interruptible;
 
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
 
ret = i915_gem_check_olr(ring, seqno);
ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL);
if (ret)
return ret;
 
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
NULL, NULL);
}
 
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
{
if (!obj->active)
__i915_gem_request_retire__upto(req);
return 0;
 
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*
* Note that the last_write_seqno is always the earlier of
* the two (read/write) seqno, so if we haved successfully waited,
* we know we have passed the last write.
*/
obj->last_write_seqno = 0;
 
return 0;
}
 
/**
1123,45 → 1243,76
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
static __must_check int
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
struct intel_engine_cs *ring = obj->ring;
u32 seqno;
int ret;
int ret, i;
 
seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
if (seqno == 0)
if (!obj->active)
return 0;
 
ret = i915_wait_seqno(ring, seqno);
if (readonly) {
if (obj->last_write_req != NULL) {
ret = i915_wait_request(obj->last_write_req);
if (ret)
return ret;
 
return i915_gem_object_wait_rendering__tail(obj);
i = obj->last_write_req->ring->id;
if (obj->last_read_req[i] == obj->last_write_req)
i915_gem_object_retire__read(obj, i);
else
i915_gem_object_retire__write(obj);
}
} else {
for (i = 0; i < I915_NUM_RINGS; i++) {
if (obj->last_read_req[i] == NULL)
continue;
 
ret = i915_wait_request(obj->last_read_req[i]);
if (ret)
return ret;
 
i915_gem_object_retire__read(obj, i);
}
RQ_BUG_ON(obj->active);
}
 
return 0;
}
 
static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
int ring = req->ring->id;
 
if (obj->last_read_req[ring] == req)
i915_gem_object_retire__read(obj, ring);
else if (obj->last_write_req == req)
i915_gem_object_retire__write(obj);
 
__i915_gem_request_retire__upto(req);
}
 
/* A nonblocking variant of the above wait. This is a highly dangerous routine
* as the object state may change during this call.
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_i915_file_private *file_priv,
struct intel_rps_client *rps,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = obj->ring;
struct drm_i915_gem_request *requests[I915_NUM_RINGS];
unsigned reset_counter;
u32 seqno;
int ret;
int ret, i, n = 0;
 
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
 
seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
if (seqno == 0)
if (!obj->active)
return 0;
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1168,19 → 1319,47
if (ret)
return ret;
 
ret = i915_gem_check_olr(ring, seqno);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (readonly) {
struct drm_i915_gem_request *req;
 
req = obj->last_write_req;
if (req == NULL)
return 0;
 
requests[n++] = i915_gem_request_reference(req);
} else {
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_request *req;
 
req = obj->last_read_req[i];
if (req == NULL)
continue;
 
requests[n++] = i915_gem_request_reference(req);
}
}
 
mutex_unlock(&dev->struct_mutex);
ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
file_priv);
for (i = 0; ret == 0 && i < n; i++)
ret = __i915_wait_request(requests[i], reset_counter, true,
NULL, rps);
mutex_lock(&dev->struct_mutex);
if (ret)
 
for (i = 0; i < n; i++) {
if (ret == 0)
i915_gem_object_retire_request(obj, requests[i]);
i915_gem_request_unreference(requests[i]);
}
 
return ret;
}
 
return i915_gem_object_wait_rendering__tail(obj);
static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
struct drm_i915_file_private *fpriv = file->driver_priv;
return &fpriv->rps;
}
 
/**
1225,24 → 1404,21
* to catch cases where we are gazumped.
*/
ret = i915_gem_object_wait_rendering__nonblocking(obj,
file->driver_priv,
to_rps_client(file),
!write_domain);
if (ret)
goto unref;
 
if (read_domains & I915_GEM_DOMAIN_GTT) {
if (read_domains & I915_GEM_DOMAIN_GTT)
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
*/
if (ret == -EINVAL)
ret = 0;
} else {
else
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
 
if (write_domain != 0)
intel_fb_obj_invalidate(obj,
write_domain == I915_GEM_DOMAIN_GTT ?
ORIGIN_GTT : ORIGIN_CPU);
 
unref:
drm_gem_object_unreference(&obj->base);
unlock:
1273,7 → 1449,7
 
/* Pinned buffers may be scanout, so flush the cache */
if (obj->pin_display)
i915_gem_object_flush_cpu_write_domain(obj, true);
i915_gem_object_flush_cpu_write_domain(obj);
 
drm_gem_object_unreference(&obj->base);
unlock:
1306,6 → 1482,8
struct drm_gem_object *obj;
unsigned long addr;
 
// if (args->flags & ~(I915_MMAP_WC))
// return -EINVAL;
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
1366,6 → 1544,15
obj->fault_mappable = false;
}
 
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
i915_gem_release_mmap(obj);
}
 
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
1437,11 → 1624,6
goto unlock;
}
 
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
}
 
if (obj->madv != I915_MADV_WILLNEED) {
DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
ret = -EFAULT;
1513,12 → 1695,6
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
 
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
return obj->madv == I915_MADV_DONTNEED;
}
 
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1573,6 → 1749,7
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
i915_gem_gtt_finish_object(obj);
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
 
1613,23 → 1790,18
return 0;
}
 
 
 
 
 
 
 
 
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int page_count, i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
struct sg_page_iter sg_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
int ret;
gfp_t gfp;
 
/* Assert that the object is not currently in any GPU domain. As it
1686,6 → 1858,9
sg_mark_end(sg);
obj->pages = st;
 
ret = i915_gem_gtt_prepare_object(obj);
if (ret)
goto err_pages;
 
if (obj->tiling_mode != I915_TILING_NONE &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1732,86 → 1907,76
return ret;
 
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
 
return 0;
}
 
static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
u32 seqno = intel_ring_get_seqno(ring);
struct drm_i915_gem_object *obj = vma->obj;
struct intel_engine_cs *ring;
 
BUG_ON(ring == NULL);
if (obj->ring != ring && obj->last_write_seqno) {
/* Keep the seqno relative to the current ring */
obj->last_write_seqno = seqno;
}
obj->ring = ring;
ring = i915_gem_request_get_ring(req);
 
/* Add a reference if we're newly entering the active list. */
if (!obj->active) {
if (obj->active == 0)
drm_gem_object_reference(&obj->base);
obj->active = 1;
}
obj->active |= intel_ring_flag(ring);
 
list_move_tail(&obj->ring_list, &ring->active_list);
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
obj->last_read_seqno = seqno;
list_move_tail(&vma->mm_list, &vma->vm->active_list);
}
 
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_engine_cs *ring)
static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
list_move_tail(&vma->mm_list, &vma->vm->active_list);
return i915_gem_object_move_to_active(vma->obj, ring);
RQ_BUG_ON(obj->last_write_req == NULL);
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
 
i915_gem_request_assign(&obj->last_write_req, NULL);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
 
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_address_space *vm;
struct i915_vma *vma;
 
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
RQ_BUG_ON(obj->last_read_req[ring] == NULL);
RQ_BUG_ON(!(obj->active & (1 << ring)));
 
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
vma = i915_gem_obj_to_vma(obj, vm);
if (vma && !list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vm->inactive_list);
}
list_del_init(&obj->ring_list[ring]);
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
 
intel_fb_obj_flush(obj, true);
if (obj->last_write_req && obj->last_write_req->ring->id == ring)
i915_gem_object_retire__write(obj);
 
list_del_init(&obj->ring_list);
obj->ring = NULL;
obj->active &= ~(1 << ring);
if (obj->active)
return;
 
obj->last_read_seqno = 0;
obj->last_write_seqno = 0;
obj->base.write_domain = 0;
/* Bump our place on the bound list to keep it roughly in LRU order
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
list_move_tail(&obj->global_list,
&to_i915(obj->base.dev)->mm.bound_list);
 
obj->last_fenced_seqno = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
}
 
obj->active = 0;
i915_gem_request_assign(&obj->last_fenced_req, NULL);
drm_gem_object_unreference(&obj->base);
 
WARN_ON(i915_verify_lists(dev));
}
 
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
struct intel_engine_cs *ring = obj->ring;
 
if (ring == NULL)
return;
 
if (i915_seqno_passed(ring->get_seqno(ring, true),
obj->last_read_seqno))
i915_gem_object_move_to_inactive(obj);
}
 
static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
1882,27 → 2047,35
return 0;
}
 
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
bool flush_caches)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
struct intel_engine_cs *ring;
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_ring_position, request_start;
u32 request_start;
int ret;
 
request = ring->preallocated_lazy_request;
if (WARN_ON(request == NULL))
return -ENOMEM;
return;
 
if (i915.enable_execlists) {
struct intel_context *ctx = request->ctx;
ringbuf = ctx->engine[ring->id].ringbuf;
} else
ringbuf = ring->buffer;
ring = request->ring;
dev_priv = ring->dev->dev_private;
ringbuf = request->ringbuf;
 
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
intel_ring_reserved_space_use(ringbuf);
 
request_start = intel_ring_get_tail(ringbuf);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
1911,14 → 2084,13
* is that the flush _must_ happen before the next request, no matter
* what.
*/
if (i915.enable_execlists) {
ret = logical_ring_flush_all_caches(ringbuf);
if (ret)
return ret;
} else {
ret = intel_ring_flush_all_caches(ring);
if (ret)
return ret;
if (flush_caches) {
if (i915.enable_execlists)
ret = logical_ring_flush_all_caches(request);
else
ret = intel_ring_flush_all_caches(request);
/* Not allowed to fail! */
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
 
/* Record the position of the start of the request so that
1926,22 → 2098,19
* GPU processing the request, we never over-estimate the
* position of the head.
*/
request_ring_position = intel_ring_get_tail(ringbuf);
request->postfix = intel_ring_get_tail(ringbuf);
 
if (i915.enable_execlists) {
ret = ring->emit_request(ringbuf);
if (ret)
return ret;
} else {
ret = ring->add_request(ring);
if (ret)
return ret;
if (i915.enable_execlists)
ret = ring->emit_request(request);
else {
ret = ring->add_request(request);
 
request->tail = intel_ring_get_tail(ringbuf);
}
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
 
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->head = request_start;
request->tail = request_ring_position;
 
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
1951,33 → 2120,13
*/
request->batch_obj = obj;
 
if (!i915.enable_execlists) {
/* Hold a reference to the current context so that we can inspect
* it later in case a hangcheck error event fires.
*/
request->ctx = ring->last_context;
if (request->ctx)
i915_gem_context_reference(request->ctx);
}
 
request->emitted_jiffies = jiffies;
request->previous_seqno = ring->last_submitted_seqno;
ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
 
if (file) {
struct drm_i915_file_private *file_priv = file->driver_priv;
trace_i915_gem_request_add(request);
 
spin_lock(&file_priv->mm.lock);
request->file_priv = file_priv;
list_add_tail(&request->client_list,
&file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
}
 
trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_seqno = 0;
ring->preallocated_lazy_request = NULL;
 
// i915_queue_hangcheck(ring->dev);
 
queue_delayed_work(dev_priv->wq,
1985,25 → 2134,10
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
 
if (out_seqno)
*out_seqno = request->seqno;
return 0;
/* Sanity check that the reserved size was large enough. */
intel_ring_reserved_space_end(ringbuf);
}
 
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
struct drm_i915_file_private *file_priv = request->file_priv;
 
if (!file_priv)
return;
 
spin_lock(&file_priv->mm.lock);
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
}
 
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
const struct intel_context *ctx)
{
2014,7 → 2148,8
if (ctx->hang_stats.banned)
return true;
 
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
if (ctx->hang_stats.ban_period_seconds &&
elapsed <= ctx->hang_stats.ban_period_seconds) {
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
2048,35 → 2183,107
}
}
 
static void i915_gem_free_request(struct drm_i915_gem_request *request)
void i915_gem_request_free(struct kref *req_ref)
{
struct intel_context *ctx = request->ctx;
struct drm_i915_gem_request *req = container_of(req_ref,
typeof(*req), ref);
struct intel_context *ctx = req->ctx;
 
list_del(&request->list);
i915_gem_request_remove_from_client(request);
if (req->file_priv)
i915_gem_request_remove_from_client(req);
 
if (ctx) {
if (i915.enable_execlists) {
struct intel_engine_cs *ring = request->ring;
if (ctx != req->ring->default_context)
intel_lr_context_unpin(req);
}
 
if (ctx != ring->default_context)
intel_lr_context_unpin(ring, ctx);
}
i915_gem_context_unreference(ctx);
}
kfree(request);
 
kfree(req);
}
 
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *req;
int ret;
 
if (!req_out)
return -EINVAL;
 
*req_out = NULL;
 
// req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
req = kzalloc(sizeof(*req),0);
if (req == NULL)
return -ENOMEM;
 
ret = i915_gem_get_seqno(ring->dev, &req->seqno);
if (ret)
goto err;
 
kref_init(&req->ref);
req->i915 = dev_priv;
req->ring = ring;
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
 
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(req);
else
ret = intel_ring_alloc_request_extras(req);
if (ret) {
i915_gem_context_unreference(req->ctx);
goto err;
}
 
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
* i915_add_request() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*/
if (i915.enable_execlists)
ret = intel_logical_ring_reserve_space(req);
else
ret = intel_ring_reserve_space(req);
if (ret) {
/*
* At this point, the request is fully allocated even if not
* fully prepared. Thus it can be cleaned up using the proper
* free code.
*/
i915_gem_request_cancel(req);
return ret;
}
 
*req_out = req;
return 0;
 
err:
kfree(req);
return ret;
}
 
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
 
i915_gem_request_unreference(req);
}
 
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *request;
u32 completed_seqno;
 
completed_seqno = ring->get_seqno(ring, false);
 
list_for_each_entry(request, &ring->request_list, list) {
if (i915_seqno_passed(completed_seqno, request->seqno))
if (i915_gem_request_completed(request, false))
continue;
 
return request;
2112,9 → 2319,9
 
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
ring_list[ring->id]);
 
i915_gem_object_move_to_inactive(obj);
i915_gem_object_retire__read(obj, ring->id);
}
 
/*
2123,15 → 2330,17
* pinned in place.
*/
while (!list_empty(&ring->execlist_queue)) {
struct intel_ctx_submit_request *submit_req;
struct drm_i915_gem_request *submit_req;
 
submit_req = list_first_entry(&ring->execlist_queue,
struct intel_ctx_submit_request,
struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
 
if (submit_req->ctx != ring->default_context)
intel_lr_context_unpin(submit_req);
 
i915_gem_request_unreference(submit_req);
}
 
/*
2148,36 → 2357,10
struct drm_i915_gem_request,
list);
 
i915_gem_free_request(request);
i915_gem_request_retire(request);
}
 
/* These may not have been flush before the reset, do so now */
kfree(ring->preallocated_lazy_request);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
}
 
void i915_gem_restore_fences(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 
/*
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
if (reg->obj) {
i915_gem_object_update_fence(reg->obj, reg,
reg->obj->tiling_mode);
} else {
i915_gem_write_fence(dev, i, NULL);
}
}
}
 
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2198,6 → 2381,8
i915_gem_context_reset(dev);
 
i915_gem_restore_fences(dev);
 
WARN_ON(i915_verify_lists(dev));
}
 
/**
2206,15 → 2391,26
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
uint32_t seqno;
WARN_ON(i915_verify_lists(ring->dev));
 
if (list_empty(&ring->request_list))
return;
/* Retire requests first as we use it above for the early return.
* If we retire requests last, we may use a later seqno and so clear
* the requests lists without clearing the active list, leading to
* confusion.
*/
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
 
WARN_ON(i915_verify_lists(ring->dev));
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
 
seqno = ring->get_seqno(ring, true);
if (!i915_gem_request_completed(request, true))
break;
 
i915_gem_request_retire(request);
}
 
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
2224,53 → 2420,18
 
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
ring_list[ring->id]);
 
if (!i915_seqno_passed(seqno, obj->last_read_seqno))
if (!list_empty(&obj->last_read_req[ring->id]->list))
break;
 
i915_gem_object_move_to_inactive(obj);
i915_gem_object_retire__read(obj, ring->id);
}
 
 
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
 
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
 
if (!i915_seqno_passed(seqno, request->seqno))
break;
 
trace_i915_gem_request_retire(ring, request->seqno);
 
/* This is one of the few common intersection points
* between legacy ringbuffer submission and execlists:
* we need to tell them apart in order to find the correct
* ringbuffer to which the request belongs to.
*/
if (i915.enable_execlists) {
struct intel_context *ctx = request->ctx;
ringbuf = ctx->engine[ring->id].ringbuf;
} else
ringbuf = ring->buffer;
 
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
ringbuf->last_retired_head = request->tail;
 
i915_gem_free_request(request);
}
 
if (unlikely(ring->trace_irq_seqno &&
i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
if (unlikely(ring->trace_irq_req &&
i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);
ring->trace_irq_seqno = 0;
i915_gem_request_assign(&ring->trace_irq_req, NULL);
}
 
WARN_ON(i915_verify_lists(ring->dev));
2330,9 → 2491,26
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), mm.idle_work.work);
struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring;
int i;
 
intel_mark_idle(dev_priv->dev);
for_each_ring(ring, dev_priv, i)
if (!list_empty(&ring->request_list))
return;
 
intel_mark_idle(dev);
 
if (mutex_trylock(&dev->struct_mutex)) {
struct intel_engine_cs *ring;
int i;
 
for_each_ring(ring, dev_priv, i)
i915_gem_batch_pool_fini(&ring->batch_pool);
 
mutex_unlock(&dev->struct_mutex);
}
}
 
/**
* Ensures that an object will eventually get non-busy by flushing any required
2342,15 → 2520,27
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
int ret;
int i;
 
if (obj->active) {
ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
if (ret)
return ret;
if (!obj->active)
return 0;
 
i915_gem_retire_requests_ring(obj->ring);
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_request *req;
 
req = obj->last_read_req[i];
if (req == NULL)
continue;
 
if (list_empty(&req->list))
goto retire;
 
if (i915_gem_request_completed(req, true)) {
__i915_gem_request_retire__upto(req);
retire:
i915_gem_object_retire__read(obj, i);
}
}
 
return 0;
}
2383,10 → 2573,10
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *ring = NULL;
struct drm_i915_gem_request *req[I915_NUM_RINGS];
unsigned reset_counter;
u32 seqno = 0;
int ret = 0;
int i, n = 0;
int ret;
 
if (args->flags != 0)
return -EINVAL;
2406,18 → 2596,13
if (ret)
goto out;
 
if (obj->active) {
seqno = obj->last_read_seqno;
ring = obj->ring;
}
 
if (seqno == 0)
if (!obj->active)
goto out;
 
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a timeout <=0 (like busy ioctl)
* on this IOCTL with a timeout == 0 (like busy ioctl)
*/
if (args->timeout_ns <= 0) {
if (args->timeout_ns == 0) {
ret = -ETIME;
goto out;
}
2424,10 → 2609,24
 
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
for (i = 0; i < I915_NUM_RINGS; i++) {
if (obj->last_read_req[i] == NULL)
continue;
 
req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
}
 
mutex_unlock(&dev->struct_mutex);
 
return __i915_wait_seqno(ring, seqno, reset_counter, true,
&args->timeout_ns, file->driver_priv);
for (i = 0; i < n; i++) {
if (ret == 0)
ret = __i915_wait_request(req[i], reset_counter, true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
file->driver_priv);
i915_gem_request_unreference__unlocked(req[i]);
}
return ret;
 
out:
drm_gem_object_unreference(&obj->base);
2435,54 → 2634,130
return ret;
}
 
static int
__i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to,
struct drm_i915_gem_request *from_req,
struct drm_i915_gem_request **to_req)
{
struct intel_engine_cs *from;
int ret;
 
from = i915_gem_request_get_ring(from_req);
if (to == from)
return 0;
 
if (i915_gem_request_completed(from_req, true))
return 0;
 
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
&i915->rps.semaphores);
if (ret)
return ret;
 
i915_gem_object_retire_request(obj, from_req);
} else {
int idx = intel_ring_sync_index(from, to);
u32 seqno = i915_gem_request_get_seqno(from_req);
 
WARN_ON(!to_req);
 
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
 
if (*to_req == NULL) {
ret = i915_gem_request_alloc(to, to->default_context, to_req);
if (ret)
return ret;
}
 
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
ret = to->semaphore.sync_to(*to_req, from, seqno);
if (ret)
return ret;
 
/* We use last_read_req because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
from->semaphore.sync_seqno[idx] =
i915_gem_request_get_seqno(obj->last_read_req[from->id]);
}
 
return 0;
}
 
/**
* i915_gem_object_sync - sync an object to a ring.
*
* @obj: object which may be in use on another ring.
* @to: ring we wish to use the object on. May be NULL.
* @to_req: request we wish to use the object for. See below.
* This will be allocated and returned if a request is
* required but not passed in.
*
* This code is meant to abstract object synchronization with the GPU.
* Calling with NULL implies synchronizing the object with the CPU
* rather than a particular GPU ring.
* rather than a particular GPU ring. Conceptually we serialise writes
* between engines inside the GPU. We only allow one engine to write
* into a buffer at any time, but multiple readers. To ensure each has
* a coherent view of memory, we must:
*
* - If there is an outstanding write request to the object, the new
* request must wait for it to complete (either CPU or in hw, requests
* on the same ring will be naturally ordered).
*
* - If we are a write request (pending_write_domain is set), the new
* request must wait for outstanding read requests to complete.
*
* For CPU synchronisation (NULL to) no request is required. For syncing with
* rings to_req must be non-NULL. However, a request does not have to be
* pre-allocated. If *to_req is NULL and sync commands will be emitted then a
* request will be allocated automatically and returned through *to_req. Note
* that it is not guaranteed that commands will be emitted (because the system
* might already be idle). Hence there is no need to create a request that
* might never have any work submitted. Note further that if a request is
* returned in *to_req, it is the responsibility of the caller to submit
* that request (after potentially adding more work to it).
*
* Returns 0 if successful, else propagates up the lower layer error.
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to)
struct intel_engine_cs *to,
struct drm_i915_gem_request **to_req)
{
struct intel_engine_cs *from = obj->ring;
u32 seqno;
int ret, idx;
const bool readonly = obj->base.pending_write_domain == 0;
struct drm_i915_gem_request *req[I915_NUM_RINGS];
int ret, i, n;
 
if (from == NULL || to == from)
if (!obj->active)
return 0;
 
if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
return i915_gem_object_wait_rendering(obj, false);
if (to == NULL)
return i915_gem_object_wait_rendering(obj, readonly);
 
idx = intel_ring_sync_index(from, to);
 
seqno = obj->last_read_seqno;
/* Optimization: Avoid semaphore sync when we are sure we already
* waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
 
ret = i915_gem_check_olr(obj->ring, seqno);
n = 0;
if (readonly) {
if (obj->last_write_req)
req[n++] = obj->last_write_req;
} else {
for (i = 0; i < I915_NUM_RINGS; i++)
if (obj->last_read_req[i])
req[n++] = obj->last_read_req[i];
}
for (i = 0; i < n; i++) {
ret = __i915_gem_object_sync(obj, to, req[i], to_req);
if (ret)
return ret;
}
 
trace_i915_gem_ring_sync_to(from, to, seqno);
ret = to->semaphore.sync_to(to, from, seqno);
if (!ret)
/* We use last_read_seqno because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
 
return ret;
return 0;
}
 
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2490,7 → 2765,7
u32 old_write_domain, old_read_domains;
 
/* Force a pagefault for domain tracking on next user access */
// i915_gem_release_mmap(obj);
i915_gem_release_mmap(obj);
 
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
2509,7 → 2784,7
old_write_domain);
}
 
int i915_vma_unbind(struct i915_vma *vma)
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2533,18 → 2808,14
 
BUG_ON(obj->pages == NULL);
 
ret = i915_gem_object_finish_gpu(obj);
if (wait) {
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
}
 
/* Throw away the active reference before moving to the unbound list */
i915_gem_object_retire(obj);
 
if (i915_is_ggtt(vma->vm)) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
 
/* release the fence reg _after_ flushing */
2555,11 → 2826,19
 
trace_i915_vma_unbind(vma);
 
vma->unbind_vma(vma);
vma->vm->unbind_vma(vma);
vma->bound = 0;
 
list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm))
if (i915_is_ggtt(vma->vm)) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
obj->map_and_fenceable = false;
} else if (vma->ggtt_view.pages) {
sg_free_table(vma->ggtt_view.pages);
kfree(vma->ggtt_view.pages);
}
vma->ggtt_view.pages = NULL;
}
 
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
2566,10 → 2845,8
 
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (list_empty(&obj->vma_list)) {
i915_gem_gtt_finish_object(obj);
if (list_empty(&obj->vma_list))
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
}
 
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
2580,6 → 2857,16
return 0;
}
 
int i915_vma_unbind(struct i915_vma *vma)
{
return __i915_vma_unbind(vma, true);
}
 
int __i915_vma_unbind_no_wait(struct i915_vma *vma)
{
return __i915_vma_unbind(vma, false);
}
 
int i915_gpu_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2589,354 → 2876,30
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
if (!i915.enable_execlists) {
ret = i915_switch_context(ring, ring->default_context);
if (ret)
return ret;
}
struct drm_i915_gem_request *req;
 
ret = intel_ring_idle(ring);
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
}
 
return 0;
}
 
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
 
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
} else {
fence_reg = FENCE_REG_965_0;
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
 
fence_reg += reg * 8;
 
/* To w/a incoherency with non-atomic 64-bit register updates,
* we split the 64-bit update into two 32-bit writes. In order
* for a partial fence not to be evaluated between writes, we
* precede the update with write to turn off the fence register,
* and only enable the fence as the last step.
*
* For extra levels of paranoia, we make sure each step lands
* before applying the next step.
*/
I915_WRITE(fence_reg, 0);
POSTING_READ(fence_reg);
 
if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
 
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
 
I915_WRITE(fence_reg + 4, val >> 32);
POSTING_READ(fence_reg + 4);
 
I915_WRITE(fence_reg + 0, val);
POSTING_READ(fence_reg);
} else {
I915_WRITE(fence_reg + 4, 0);
POSTING_READ(fence_reg + 4);
}
}
 
static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val;
int tile_width;
 
WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
 
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
else
tile_width = 512;
 
/* Note: pitch better be a power of two tile widths */
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
 
val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
} else
val = 0;
 
if (reg < 8)
reg = FENCE_REG_830_0 + reg * 4;
else
reg = FENCE_REG_945_8 + (reg - 8) * 4;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
}
 
static void i830_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
 
if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val;
 
WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
i915_gem_obj_ggtt_offset(obj), size);
 
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
 
val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
} else
val = 0;
 
I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
POSTING_READ(FENCE_REG_830_0 + reg * 4);
}
 
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
{
return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
}
 
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
 
WARN(obj && (!obj->stride || !obj->tiling_mode),
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
obj->stride, obj->tiling_mode);
 
switch (INTEL_INFO(dev)->gen) {
case 9:
case 8:
case 7:
case 6:
case 5:
case 4: i965_write_fence_reg(dev, reg, obj); break;
case 3: i915_write_fence_reg(dev, reg, obj); break;
case 2: i830_write_fence_reg(dev, reg, obj); break;
default: BUG();
}
 
/* And similarly be paranoid that no direct access to this region
* is reordered to before the fence is installed.
*/
if (i915_gem_object_needs_mb(obj))
mb();
}
 
static inline int fence_number(struct drm_i915_private *dev_priv,
struct drm_i915_fence_reg *fence)
{
return fence - dev_priv->fence_regs;
}
 
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int reg = fence_number(dev_priv, fence);
 
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
 
if (enable) {
obj->fence_reg = reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
obj->fence_reg = I915_FENCE_REG_NONE;
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
obj->fence_dirty = false;
}
 
static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_seqno) {
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
ret = i915_switch_context(req);
if (ret) {
i915_gem_request_cancel(req);
return ret;
 
obj->last_fenced_seqno = 0;
}
 
return 0;
i915_add_request_no_flush(req);
}
 
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_fence_reg *fence;
int ret;
 
ret = i915_gem_object_wait_fence(obj);
ret = intel_ring_idle(ring);
if (ret)
return ret;
 
if (obj->fence_reg == I915_FENCE_REG_NONE)
return 0;
 
fence = &dev_priv->fence_regs[obj->fence_reg];
 
if (WARN_ON(fence->pin_count))
return -EBUSY;
 
i915_gem_object_fence_lost(obj);
i915_gem_object_update_fence(obj, fence, false);
 
return 0;
}
 
static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_fence_reg *reg, *avail;
int i;
 
/* First try to find a free reg */
avail = NULL;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
return reg;
 
if (!reg->pin_count)
avail = reg;
}
 
if (avail == NULL)
goto deadlock;
 
/* None available, try to steal one or wait for a user to finish */
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
if (reg->pin_count)
continue;
 
return reg;
}
 
deadlock:
/* Wait for completion of pending flips which consume fences */
// if (intel_has_pending_fb_unpin(dev))
// return ERR_PTR(-EAGAIN);
 
return ERR_PTR(-EDEADLK);
}
 
/**
* i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
*
* For an untiled surface, this removes any existing fence.
*/
int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
 
/* Have we updated the tiling parameters upon the object and so
* will need to serialise the write to the associated fence register?
*/
if (obj->fence_dirty) {
ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
}
 
/* Just update our place in the LRU if our fence is getting reused. */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj->fence_reg];
if (!obj->fence_dirty) {
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
WARN_ON(i915_verify_lists(dev));
return 0;
}
} else if (enable) {
if (WARN_ON(!obj->map_and_fenceable))
return -EINVAL;
 
reg = i915_find_fence_reg(dev);
if (IS_ERR(reg))
return PTR_ERR(reg);
 
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
 
ret = i915_gem_object_wait_fence(old);
if (ret)
return ret;
 
i915_gem_object_fence_lost(old);
}
} else
return 0;
 
i915_gem_object_update_fence(obj, reg, enable);
 
return 0;
}
 
static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
unsigned long cache_level)
{
2971,51 → 2934,86
}
 
/**
* Finds free space in the GTT aperture and binds the object there.
* Finds free space in the GTT aperture and binds the object or a view of it
* there.
*/
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *ggtt_view,
unsigned alignment,
uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
unsigned long start =
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
unsigned long end =
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
u32 fence_alignment, unfenced_alignment;
u32 search_flag, alloc_flag;
u64 start, end;
u64 size, fence_size;
struct i915_vma *vma;
int ret;
 
if (i915_is_ggtt(vm)) {
u32 view_size;
 
if (WARN_ON(!ggtt_view))
return ERR_PTR(-EINVAL);
 
view_size = i915_ggtt_view_size(obj, ggtt_view);
 
fence_size = i915_gem_get_gtt_size(dev,
view_size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev,
view_size,
obj->tiling_mode,
true);
unfenced_alignment = i915_gem_get_gtt_alignment(dev,
view_size,
obj->tiling_mode,
false);
size = flags & PIN_MAPPABLE ? fence_size : view_size;
} else {
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev,
obj->base.size,
obj->tiling_mode, true);
obj->tiling_mode,
true);
unfenced_alignment =
i915_gem_get_gtt_alignment(dev,
obj->base.size,
obj->tiling_mode, false);
obj->tiling_mode,
false);
size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
}
 
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
end = vm->total;
if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->gtt.mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32));
 
if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment;
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
ggtt_view ? ggtt_view->type : 0,
alignment);
return ERR_PTR(-EINVAL);
}
 
size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
 
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
/* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
* attempt to find space.
*/
if (obj->base.size > end) {
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
obj->base.size,
if (size > end) {
DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
ggtt_view ? ggtt_view->type : 0,
size,
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return ERR_PTR(-E2BIG);
3027,17 → 3025,27
 
i915_gem_object_pin_pages(obj);
 
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
i915_gem_obj_lookup_or_create_vma(obj, vm);
 
if (IS_ERR(vma))
goto err_unpin;
 
if (flags & PIN_HIGH) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
} else {
search_flag = DRM_MM_SEARCH_DEFAULT;
alloc_flag = DRM_MM_CREATE_DEFAULT;
}
 
search_free:
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level,
start, end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
search_flag,
alloc_flag);
if (ret) {
 
goto err_free_vma;
3047,7 → 3055,8
goto err_remove_node;
}
 
ret = i915_gem_gtt_prepare_object(obj);
trace_i915_vma_bind(vma, flags);
ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
goto err_remove_node;
 
3054,10 → 3063,6
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
 
trace_i915_vma_bind(vma, flags);
vma->bind_vma(vma, obj->cache_level,
flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
 
return vma;
 
err_remove_node:
3096,11 → 3101,14
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
obj->cache_dirty = true;
return false;
}
 
trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->pages);
obj->cache_dirty = false;
 
return true;
}
3127,7 → 3135,7
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
 
intel_fb_obj_flush(obj, false);
intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
3136,8 → 3144,7
 
/** Flushes the CPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
bool force)
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
 
3144,13 → 3151,13
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
 
if (i915_gem_clflush_object(obj, force))
if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(obj->base.dev);
 
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
 
intel_fb_obj_flush(obj, false);
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
3166,15 → 3173,10
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
uint32_t old_write_domain, old_read_domains;
struct i915_vma *vma;
int ret;
 
/* Not valid to be called on unbound objects. */
if (vma == NULL)
return -EINVAL;
 
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
 
3182,9 → 3184,20
if (ret)
return ret;
 
i915_gem_object_retire(obj);
i915_gem_object_flush_cpu_write_domain(obj, false);
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
* For example, if the obj->filp was moved to swap without us
* being notified and releasing the pages, we would mistakenly
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
 
i915_gem_object_flush_cpu_write_domain(obj);
 
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
3206,92 → 3219,135
obj->dirty = 1;
}
 
if (write)
intel_fb_obj_invalidate(obj, NULL);
 
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
 
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->mm_list,
&dev_priv->gtt.base.inactive_list);
&to_i915(obj->base.dev)->gtt.base.inactive_list);
 
return 0;
}
 
/**
* Changes the cache-level of an object across all VMA.
*
* After this function returns, the object will be in the new cache-level
* across all GTT and the contents of the backing storage will be coherent,
* with respect to the new cache-level. In order to keep the backing storage
* coherent for all users, we only allow a single cache level to be set
* globally on the object and prevent it from being changed whilst the
* hardware is reading from the object. That is if the object is currently
* on the scanout it will be set to uncached (or equivalent display
* cache coherency) and all non-MOCS GPU access will also be uncached so
* that all direct access to the scanout remains coherent.
*/
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct i915_vma *vma, *next;
int ret;
bool bound = false;
int ret = 0;
 
if (obj->cache_level == cache_level)
return 0;
goto out;
 
if (i915_gem_obj_is_pinned(obj)) {
/* Inspect the list of currently bound VMA and unbind any that would
* be invalid given the new cache-level. This is principally to
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
 
if (vma->pin_count) {
DRM_DEBUG("can not change the cache level of pinned objects\n");
return -EBUSY;
}
 
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(vma, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
return ret;
} else
bound = true;
}
}
 
if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_finish_gpu(obj);
/* We can reuse the existing drm_mm nodes but need to change the
* cache-level on the PTE. We could simply unbind them all and
* rebind with the correct cache-level on next use. However since
* we already have a valid slot, dma mapping, pages etc, we may as
* rewrite the PTE in the belief that doing so tramples upon less
* state and so involves less work.
*/
if (bound) {
/* Before we change the PTE, the GPU must not be accessing it.
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
*/
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
 
i915_gem_object_finish_gtt(obj);
if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
* userspace to refault in the pages and we can
* then double check if the GTT mapping is still
* valid for that pointer access.
*/
i915_gem_release_mmap(obj);
 
/* Before SandyBridge, you could not use tiling or fence
* registers with snooped memory, so relinquish any fences
* currently pointing to our region in the aperture.
/* As we no longer need a fence for GTT access,
* we can relinquish it now (and so prevent having
* to steal a fence from someone else on the next
* fence request). Note GPU activity would have
* dropped the fence as all snoopable access is
* supposed to be linear.
*/
if (INTEL_INFO(dev)->gen < 6) {
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
} else {
/* We either have incoherent backing store and
* so no GTT access or the architecture is fully
* coherent. In such cases, existing GTT mmaps
* ignore the cache bit in the PTE and we can
* rewrite it without confusing the GPU or having
* to force userspace to fault back in its mmaps.
*/
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
vma->bind_vma(vma, cache_level,
vma->bound & GLOBAL_BIND);
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
 
ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
if (ret)
return ret;
}
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
 
if (cpu_write_needs_clflush(obj)) {
u32 old_read_domains, old_write_domain;
 
/* If we're coming from LLC cached, then we haven't
* actually been tracking whether the data is in the
* CPU cache or not, since we only allow one bit set
* in obj->write_domain and have been skipping the clflushes.
* Just set it to the CPU cache for now.
out:
/* Flush the dirty CPU caches to the backing storage so that the
* object is now coherent at its new cache level (with respect
* to the access domain).
*/
i915_gem_object_retire(obj);
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
 
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
 
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
if (obj->cache_dirty &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
if (i915_gem_clflush_object(obj, true))
i915_gem_chipset_flush(obj->base.dev);
}
 
return 0;
3302,17 → 3358,10
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
int ret;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
if (&obj->base == NULL)
return -ENOENT;
 
switch (obj->cache_level) {
case I915_CACHE_LLC:
3329,15 → 3378,14
break;
}
 
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
drm_gem_object_unreference_unlocked(&obj->base);
return 0;
}
 
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
3348,6 → 3396,15
level = I915_CACHE_NONE;
break;
case I915_CACHING_CACHED:
/*
* Due to a HW issue on BXT A stepping, GPU stores via a
* snooped mapping may leave stale data in a corresponding CPU
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
return -ENODEV;
 
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
3357,9 → 3414,11
return -EINVAL;
}
 
intel_runtime_pm_get(dev_priv);
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
goto rpm_put;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
3372,31 → 3431,12
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
rpm_put:
intel_runtime_pm_put(dev_priv);
 
return ret;
}
 
static bool is_pin_display(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
 
vma = i915_gem_obj_to_ggtt(obj);
if (!vma)
return false;
 
/* There are 3 sources that pin objects:
* 1. The display engine (scanouts, sprites, cursors);
* 2. Reservations for execbuffer;
* 3. The user.
*
* We can ignore reservations as we hold the struct_mutex and
* are only called outside of the reservation path. The user
* can only increment pin_count once, and so if after
* subtracting the potential reference by the user, any pin_count
* remains, it must be due to another use by the display engine.
*/
return vma->pin_count - !!obj->user_pin_count;
}
 
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
3405,23 → 3445,21
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_engine_cs *pipelined)
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
bool was_pin_display;
int ret;
 
if (pipelined != obj->ring) {
ret = i915_gem_object_sync(obj, pipelined);
ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
if (ret)
return ret;
}
 
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
was_pin_display = obj->pin_display;
obj->pin_display = true;
obj->pin_display++;
 
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
3441,11 → 3479,13
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
ret = i915_gem_object_ggtt_pin(obj, view, alignment,
view->type == I915_GGTT_VIEW_NORMAL ?
PIN_MAPPABLE : 0);
if (ret)
goto err_unpin_display;
 
i915_gem_object_flush_cpu_write_domain(obj, true);
i915_gem_object_flush_cpu_write_domain(obj);
 
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
3463,33 → 3503,20
return 0;
 
err_unpin_display:
WARN_ON(was_pin_display != is_pin_display(obj));
obj->pin_display = was_pin_display;
obj->pin_display--;
return ret;
}
 
void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
i915_gem_object_ggtt_unpin(obj);
obj->pin_display = is_pin_display(obj);
}
if (WARN_ON(obj->pin_display == 0))
return;
 
int
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
{
int ret;
i915_gem_object_ggtt_unpin_view(obj, view);
 
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
 
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
 
/* Ensure that we invalidate the GPU's caches and TLBs. */
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
return 0;
obj->pin_display--;
}
 
/**
3511,7 → 3538,6
if (ret)
return ret;
 
i915_gem_object_retire(obj);
i915_gem_object_flush_gtt_write_domain(obj);
 
old_write_domain = obj->base.write_domain;
3537,9 → 3563,6
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
if (write)
intel_fb_obj_invalidate(obj, NULL);
 
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
3562,11 → 3585,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_engine_cs *ring = NULL;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret;
 
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3582,19 → 3603,29
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
 
ring = request->ring;
seqno = request->seqno;
/*
* Note that the request might not have been submitted yet.
* In which case emitted_jiffies will be zero.
*/
if (!request->emitted_jiffies)
continue;
 
target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (target)
i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
 
if (seqno == 0)
if (target == NULL)
return 0;
 
ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
i915_gem_request_unreference__unlocked(target);
 
return ret;
}
 
3617,9 → 3648,33
return false;
}
 
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
bool mappable, fenceable;
u32 fence_size, fence_alignment;
 
fence_size = i915_gem_get_gtt_size(obj->base.dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
obj->base.size,
obj->tiling_mode,
true);
 
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
 
mappable = (vma->node.start + fence_size <=
to_i915(obj->base.dev)->gtt.mappable_end);
 
obj->map_and_fenceable = mappable && fenceable;
}
 
static int
i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *ggtt_view,
uint32_t alignment,
uint64_t flags)
{
3637,7 → 3692,15
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
 
vma = i915_gem_obj_to_vma(obj, vm);
if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
return -EINVAL;
 
vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
i915_gem_obj_to_vma(obj, vm);
 
if (IS_ERR(vma))
return PTR_ERR(vma);
 
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
3644,10 → 3707,13
 
if (i915_vma_misplaced(vma, alignment, flags)) {
WARN(vma->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
"bo is already pinned in %s with incorrect alignment:"
" offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
i915_gem_obj_offset(obj, vm), alignment,
ggtt_view ? "ggtt" : "ppgtt",
upper_32_bits(vma->node.start),
lower_32_bits(vma->node.start),
alignment,
!!(flags & PIN_MAPPABLE),
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
3660,180 → 3726,64
 
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
} else {
ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
return ret;
}
 
if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 
if ((bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
u32 fence_size, fence_alignment;
 
fence_size = i915_gem_get_gtt_size(obj->base.dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
obj->base.size,
obj->tiling_mode,
true);
 
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
 
mappable = (vma->node.start + obj->base.size <=
dev_priv->gtt.mappable_end);
 
obj->map_and_fenceable = mappable && fenceable;
if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
(bound ^ vma->bound) & GLOBAL_BIND) {
__i915_vma_set_map_and_fenceable(vma);
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
}
 
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
 
vma->pin_count++;
if (flags & PIN_MAPPABLE)
obj->pin_mappable |= true;
 
return 0;
}
 
void
i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
uint64_t flags)
{
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
 
BUG_ON(!vma);
BUG_ON(vma->pin_count == 0);
BUG_ON(!i915_gem_obj_ggtt_bound(obj));
 
if (--vma->pin_count == 0)
obj->pin_mappable = false;
return i915_gem_object_do_pin(obj, vm,
i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
alignment, flags);
}
 
bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
int
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
uint32_t alignment,
uint64_t flags)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
if (WARN_ONCE(!view, "no view specified"))
return -EINVAL;
 
WARN_ON(!ggtt_vma ||
dev_priv->fence_regs[obj->fence_reg].pin_count >
ggtt_vma->pin_count);
dev_priv->fence_regs[obj->fence_reg].pin_count++;
return true;
} else
return false;
return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
alignment, flags | PIN_GLOBAL);
}
 
void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
}
struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
struct drm_i915_gem_object *obj;
int ret;
BUG_ON(!vma);
WARN_ON(vma->pin_count == 0);
WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
--vma->pin_count;
}
 
if (obj->madv != I915_MADV_WILLNEED) {
DRM_DEBUG("Attempting to pin a purgeable buffer\n");
ret = -EFAULT;
goto out;
}
 
if (obj->pin_filp != NULL && obj->pin_filp != file) {
DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
 
if (obj->user_pin_count == ULONG_MAX) {
ret = -EBUSY;
goto out;
}
 
if (obj->user_pin_count == 0) {
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
if (ret)
goto out;
}
 
obj->user_pin_count++;
obj->pin_filp = file;
 
args->offset = i915_gem_obj_ggtt_offset(obj);
out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
struct drm_i915_gem_object *obj;
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
 
if (obj->pin_filp != file) {
DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
obj->user_pin_count--;
if (obj->user_pin_count == 0) {
obj->pin_filp = NULL;
i915_gem_object_ggtt_unpin(obj);
}
 
out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
3857,13 → 3807,15
* necessary flushes here.
*/
ret = i915_gem_object_flush_active(obj);
if (ret)
goto unref;
 
args->busy = obj->active;
if (obj->ring) {
BUILD_BUG_ON(I915_NUM_RINGS > 16);
args->busy |= intel_ring_flag(obj->ring) << 16;
}
args->busy = obj->active << 16;
if (obj->last_write_req)
args->busy |= obj->last_write_req->ring->id;
 
unref:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
3924,7 → 3876,7
obj->madv = args->madv;
 
/* if the object is no longer attached, discard its backing storage */
if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
i915_gem_object_truncate(obj);
 
args->retained = obj->madv != __I915_MADV_PURGED;
3940,10 → 3892,14
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
int i;
 
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
for (i = 0; i < I915_NUM_RINGS; i++)
INIT_LIST_HEAD(&obj->ring_list[i]);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
 
obj->ops = ops;
 
4069,10 → 4025,29
struct i915_address_space *vm)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
return vma;
}
return NULL;
}
 
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
 
if (WARN_ONCE(!view, "no view specified"))
return ERR_PTR(-EINVAL);
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
}
 
4095,6 → 4070,17
kfree(vma);
}
 
static void
i915_gem_stop_ringbuffers(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int i;
 
for_each_ring(ring, dev_priv, i)
dev_priv->gt.stop_ring(ring);
}
 
#if 0
int
i915_gem_suspend(struct drm_device *dev)
4109,17 → 4095,18
 
i915_gem_retire_requests(dev);
 
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
 
i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex);
 
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
flush_delayed_work(&dev_priv->mm.idle_work);
 
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->mm.busy);
 
return 0;
 
err:
4128,8 → 4115,9
}
#endif
 
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
{
struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4139,7 → 4127,7
if (!HAS_L3_DPF(dev) || !remap_info)
return 0;
 
ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
if (ret)
return ret;
 
4184,22 → 4172,6
BUG();
}
 
static bool
intel_enable_blt(struct drm_device *dev)
{
if (!HAS_BLT(dev))
return false;
 
/* The blitter was dysfunctional on early prototypes */
if (IS_GEN6(dev) && dev->pdev->revision < 8) {
DRM_INFO("BLT not supported on this pre-production hardware;"
" graphics performance will be degraded.\n");
return false;
}
 
return true;
}
 
static void init_unused_ring(struct drm_device *dev, u32 base)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4232,14 → 4204,6
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
/*
* At least 830 can leave some of the unused rings
* "active" (ie. head != tail) after resume which
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
init_unused_rings(dev);
 
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
4250,7 → 4214,7
goto cleanup_render_ring;
}
 
if (intel_enable_blt(dev)) {
if (HAS_BLT(dev)) {
ret = intel_init_blt_ring_buffer(dev);
if (ret)
goto cleanup_bsd_ring;
4268,14 → 4232,8
goto cleanup_vebox_ring;
}
 
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
goto cleanup_bsd2_ring;
 
return 0;
 
cleanup_bsd2_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
cleanup_vebox_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
cleanup_blt_ring:
4292,11 → 4250,15
i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
struct intel_engine_cs *ring;
int ret, i, j;
 
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
 
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
if (dev_priv->ellc_size)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
4318,34 → 4280,94
 
i915_gem_init_swizzling(dev);
 
ret = dev_priv->gt.init_rings(dev);
/*
* At least 830 can leave some of the unused rings
* "active" (ie. head != tail) after resume which
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
init_unused_rings(dev);
 
BUG_ON(!dev_priv->ring[RCS].default_context);
 
ret = i915_ppgtt_init_hw(dev);
if (ret) {
DRM_ERROR("PPGTT enable HW failed %d\n", ret);
goto out;
}
 
/* Need to do basic initialisation of all rings first: */
for_each_ring(ring, dev_priv, i) {
ret = ring->init_hw(ring);
if (ret)
return ret;
goto out;
}
 
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
/* We can't enable contexts until all firmware is loaded */
if (HAS_GUC_UCODE(dev)) {
ret = intel_guc_ucode_load(dev);
if (ret) {
/*
* If we got an error and GuC submission is enabled, map
* the error to -EIO so the GPU will be declared wedged.
* OTOH, if we didn't intend to use the GuC anyway, just
* discard the error and carry on.
*/
DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
i915.enable_guc_submission ? "" :
" (ignored)");
ret = i915.enable_guc_submission ? -EIO : 0;
if (ret)
goto out;
}
}
 
/*
* XXX: Contexts should only be initialized once. Doing a switch to the
* default context switch however is something we'd like to do after
* reset or thaw (the latter may not actually be necessary for HW, but
* goes with our code better). Context switching requires rings (for
* the do_switch), but before enabling PPGTT. So don't move this.
* Increment the next seqno by 0x100 so we have a visible break
* on re-initialisation
*/
ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
DRM_ERROR("Context enable failed %d\n", ret);
ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
if (ret)
goto out;
 
/* Now it is safe to go back round and do everything else: */
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req;
 
WARN_ON(!ring->default_context);
 
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret) {
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
 
return ret;
if (ring->id == RCS) {
for (j = 0; j < NUM_L3_SLICES(dev); j++)
i915_gem_l3_remap(req, j);
}
 
ret = i915_ppgtt_init_hw(dev);
ret = i915_ppgtt_init_ring(req);
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable failed %d\n", ret);
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
i915_gem_request_cancel(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
 
ret = i915_gem_context_enable(req);
if (ret && ret != -EIO) {
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
i915_gem_request_cancel(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
 
i915_add_request_no_flush(req);
}
 
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
 
4368,31 → 4390,39
}
 
if (!i915.enable_execlists) {
dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
dev_priv->gt.init_rings = i915_gem_init_rings;
dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
dev_priv->gt.stop_ring = intel_stop_ring_buffer;
} else {
dev_priv->gt.do_execbuf = intel_execlists_submission;
dev_priv->gt.execbuf_submit = intel_execlists_submission;
dev_priv->gt.init_rings = intel_logical_rings_init;
dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
dev_priv->gt.stop_ring = intel_logical_ring_stop;
}
 
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If
* we hold the forcewake during initialisation these problems
* just magically go away.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
// ret = i915_gem_init_userptr(dev);
// if (ret) {
// mutex_unlock(&dev->struct_mutex);
// return ret;
// }
// if (ret)
// goto out_unlock;
 
i915_gem_init_global_gtt(dev);
 
ret = i915_gem_context_init(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
if (ret)
goto out_unlock;
 
ret = dev_priv->gt.init_rings(dev);
if (ret)
goto out_unlock;
 
ret = i915_gem_init_hw(dev);
if (ret == -EIO) {
/* Allow ring initialisation to fail by marking the GPU as
4400,9 → 4430,12
* for all other failure, such as an allocation failure, bail.
*/
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0;
}
 
out_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
 
return ret;
4426,18 → 4459,6
INIT_LIST_HEAD(&ring->request_list);
}
 
void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
if (!i915_is_ggtt(vm))
drm_mm_init(&vm->mm, vm->start, vm->total);
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->global_link);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
 
void
i915_gem_load(struct drm_device *dev)
{
4445,8 → 4466,6
int i;
 
INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
 
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4461,18 → 4480,8
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
 
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
/* Old X drivers will take 0-2 for front, back, depth buffers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
 
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4480,6 → 4489,18
else
dev_priv->num_fence_regs = 8;
 
if (intel_vgpu_active(dev))
dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num));
 
/*
* Set initial sequence number for requests.
* Using this number allows the wraparound to happen early,
* catching any obvious problems.
*/
dev_priv->next_seqno = ((u32)~0 - 0x1100);
dev_priv->last_seqno = ((u32)~0 - 0x1101);
 
/* Initialize fence registers to zero */
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
i915_gem_restore_fences(dev);
4491,6 → 4512,33
mutex_init(&dev_priv->fb_tracking.lock);
}
 
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
 
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
spin_lock(&file_priv->mm.lock);
while (!list_empty(&file_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
 
request = list_first_entry(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_list);
list_del(&request->client_list);
request->file_priv = NULL;
}
spin_unlock(&file_priv->mm.lock);
 
if (!list_empty(&file_priv->rps.link)) {
spin_lock(&to_i915(dev)->rps.client_lock);
list_del(&file_priv->rps.link);
spin_unlock(&to_i915(dev)->rps.client_lock);
}
}
 
int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
4505,11 → 4553,10
file->driver_priv = file_priv;
file_priv->dev_priv = dev->dev_private;
file_priv->file = file;
INIT_LIST_HEAD(&file_priv->rps.link);
 
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
// INIT_DELAYED_WORK(&file_priv->mm.idle_work,
// i915_gem_file_idle_work_handler);
 
ret = i915_gem_context_open(dev, file);
if (ret)
4520,9 → 4567,9
 
/**
* i915_gem_track_fb - update frontbuffer tracking
* old: current GEM buffer for the frontbuffer slots
* new: new GEM buffer for the frontbuffer slots
* frontbuffer_bits: bitmask of frontbuffer slots
* @old: current GEM buffer for the frontbuffer slots
* @new: new GEM buffer for the frontbuffer slots
* @frontbuffer_bits: bitmask of frontbuffer slots
*
* This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
* from @old and setting them in @new. Both @old and @new can be NULL.
4544,21 → 4591,8
}
}
 
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
return false;
 
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
return false;
#endif
}
 
/* All the new VM stuff */
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4567,27 → 4601,64
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
return vma->node.start;
}
 
}
WARN(1, "%s vma for this object not found.\n",
i915_is_ggtt(vm) ? "global" : "ppgtt");
return -1;
}
 
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
 
WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
return -1;
}
 
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
return true;
}
 
return false;
}
 
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
 
return false;
}
 
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
struct i915_vma *vma;
4609,22 → 4680,62
 
BUG_ON(list_empty(&o->vma_list));
 
list_for_each_entry(vma, &o->vma_list, vma_link)
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
return vma->node.size;
 
}
return 0;
}
 
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->pin_count > 0)
return true;
 
return false;
}
 
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
const void *data, size_t size)
{
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
struct sg_table *sg;
size_t bytes;
int ret;
 
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
if (vma->vm != i915_obj_to_ggtt(obj))
return NULL;
obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
if (IS_ERR_OR_NULL(obj))
return obj;
 
return vma;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
goto fail;
 
ret = i915_gem_object_get_pages(obj);
if (ret)
goto fail;
 
i915_gem_object_pin_pages(obj);
sg = obj->pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
i915_gem_object_unpin_pages(obj);
 
if (WARN_ON(bytes != size)) {
DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
ret = -EFAULT;
goto fail;
}
 
return obj;
 
fail:
drm_gem_object_unreference(&obj->base);
return ERR_PTR(ret);
}
/drivers/video/drm/i915/i915_gem_batch_pool.c
0,0 → 1,151
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
 
#include "i915_drv.h"
#include "i915_gem_batch_pool.h"
 
/**
* DOC: batch pool
*
* In order to submit batch buffers as 'secure', the software command parser
* must ensure that a batch buffer cannot be modified after parsing. It does
* this by copying the user provided batch buffer contents to a kernel owned
* buffer from which the hardware will actually execute, and by carefully
* managing the address space bindings for such buffers.
*
* The batch pool framework provides a mechanism for the driver to manage a
* set of scratch buffers to use for this purpose. The framework can be
* extended to support other uses cases should they arise.
*/
 
/**
* i915_gem_batch_pool_init() - initialize a batch buffer pool
* @dev: the drm device
* @pool: the batch buffer pool
*/
void i915_gem_batch_pool_init(struct drm_device *dev,
struct i915_gem_batch_pool *pool)
{
int n;
 
pool->dev = dev;
 
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
}
 
/**
* i915_gem_batch_pool_fini() - clean up a batch buffer pool
* @pool: the pool to clean up
*
* Note: Callers must hold the struct_mutex.
*/
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
int n;
 
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
while (!list_empty(&pool->cache_list[n])) {
struct drm_i915_gem_object *obj =
list_first_entry(&pool->cache_list[n],
struct drm_i915_gem_object,
batch_pool_link);
 
list_del(&obj->batch_pool_link);
drm_gem_object_unreference(&obj->base);
}
}
}
 
/**
* i915_gem_batch_pool_get() - allocate a buffer from the pool
* @pool: the batch buffer pool
* @size: the minimum desired size of the returned buffer
*
* Returns an inactive buffer from @pool with at least @size bytes,
* with the pages pinned. The caller must i915_gem_object_unpin_pages()
* on the returned object.
*
* Note: Callers must hold the struct_mutex
*
* Return: the buffer object or an error pointer
*/
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *tmp, *next;
struct list_head *list;
int n;
 
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
 
/* Compute a power-of-two bucket, but throw everything greater than
* 16KiB into the same bucket: i.e. the the buckets hold objects of
* (1 page, 2 pages, 4 pages, 8+ pages).
*/
n = fls(size >> PAGE_SHIFT) - 1;
if (n >= ARRAY_SIZE(pool->cache_list))
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
 
list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (tmp->active)
break;
 
/* While we're looping, do some clean up */
if (tmp->madv == __I915_MADV_PURGED) {
list_del(&tmp->batch_pool_link);
drm_gem_object_unreference(&tmp->base);
continue;
}
 
if (tmp->base.size >= size) {
obj = tmp;
break;
}
}
 
if (obj == NULL) {
int ret;
 
obj = i915_gem_alloc_object(pool->dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
 
ret = i915_gem_object_get_pages(obj);
if (ret)
return ERR_PTR(ret);
 
obj->madv = I915_MADV_DONTNEED;
}
 
list_move_tail(&obj->batch_pool_link, list);
i915_gem_object_pin_pages(obj);
return obj;
}
/drivers/video/drm/i915/i915_gem_batch_pool.h
0,0 → 1,42
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
 
#ifndef I915_GEM_BATCH_POOL_H
#define I915_GEM_BATCH_POOL_H
 
#include "i915_drv.h"
 
struct i915_gem_batch_pool {
struct drm_device *dev;
struct list_head cache_list[4];
};
 
/* i915_gem_batch_pool.c */
void i915_gem_batch_pool_init(struct drm_device *dev,
struct i915_gem_batch_pool *pool);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
struct drm_i915_gem_object*
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
 
#endif /* I915_GEM_BATCH_POOL_H */
/drivers/video/drm/i915/i915_gem_context.c
133,10 → 133,24
return ret;
}
 
static void i915_gem_context_clean(struct intel_context *ctx)
{
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct i915_vma *vma, *next;
 
if (!ppgtt)
return;
 
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
mm_list) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
break;
}
}
 
void i915_gem_context_free(struct kref *ctx_ref)
{
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
 
trace_i915_context_free(ctx);
 
143,6 → 157,13
if (i915.enable_execlists)
intel_lr_context_free(ctx);
 
/*
* This context is going away and we need to remove all VMAs still
* around. This is to handle imported shared objects for which
* destructor did not run when their handles were closed.
*/
i915_gem_context_clean(ctx);
 
i915_ppgtt_put(ctx->ppgtt);
 
if (ctx->legacy_hw_ctx.rcs_state)
195,6 → 216,7
 
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv;
 
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
222,6 → 244,8
* is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
 
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
 
return ctx;
 
err_out:
285,6 → 309,7
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
idr_remove(&file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
294,11 → 319,15
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* In execlists mode we will unreference the context when the execlist
* queue is cleared and the requests destroyed.
*/
if (i915.enable_execlists)
if (i915.enable_execlists) {
struct intel_context *ctx;
 
list_for_each_entry(ctx, &dev_priv->context_list, link) {
intel_lr_context_reset(dev, ctx);
}
 
return;
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
325,6 → 354,13
if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
 
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
if (!i915.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
}
 
if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
401,19 → 437,21
i915_gem_context_unreference(dctx);
}
 
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring;
int ret, i;
struct intel_engine_cs *ring = req->ring;
int ret;
 
BUG_ON(!dev_priv->ring[RCS].default_context);
 
if (i915.enable_execlists)
if (i915.enable_execlists) {
if (ring->init_context == NULL)
return 0;
 
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context);
if (ret)
ret = ring->init_context(req);
} else
ret = i915_switch_context(req);
 
if (ret) {
DRM_ERROR("ring init context: %d\n", ret);
return ret;
}
 
468,10 → 506,9
}
 
static inline int
mi_set_context(struct intel_engine_cs *ring,
struct intel_context *new_context,
u32 hw_flags)
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct intel_engine_cs *ring = req->ring;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
486,13 → 523,15
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev)) {
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
 
/* These flags are for resource streamer on HSW+ */
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
else if (INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
500,7 → 539,7
if (INTEL_INFO(ring->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
ret = intel_ring_begin(ring, len);
ret = intel_ring_begin(req, len);
if (ret)
return ret;
 
523,7 → 562,7
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
552,14 → 591,66
return ret;
}
 
static int do_switch(struct intel_engine_cs *ring,
static inline bool should_skip_switch(struct intel_engine_cs *ring,
struct intel_context *from,
struct intel_context *to)
{
if (to->remap_slice)
return false;
 
if (to->ppgtt && from == to &&
!(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
return true;
 
return false;
}
 
static bool
needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
if (!to->ppgtt)
return false;
 
if (INTEL_INFO(ring->dev)->gen < 8)
return true;
 
if (ring != &dev_priv->ring[RCS])
return true;
 
return false;
}
 
static bool
needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
u32 hw_flags)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
if (!to->ppgtt)
return false;
 
if (!IS_GEN8(ring->dev))
return false;
 
if (ring != &dev_priv->ring[RCS])
return false;
 
if (hw_flags & MI_RESTORE_INHIBIT)
return true;
 
return false;
}
 
static int do_switch(struct drm_i915_gem_request *req)
{
struct intel_context *to = req->ctx;
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
struct i915_vma *vma;
int ret, i;
 
if (from != NULL && ring == &dev_priv->ring[RCS]) {
567,7 → 658,7
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
 
if (from == to && !to->remap_slice)
if (should_skip_switch(ring, from, to))
return 0;
 
/* Trying to pin first makes error handling easier. */
585,11 → 676,18
*/
from = ring->last_context;
 
if (to->ppgtt) {
if (needs_pd_load_pre(ring, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
 
/* Doing a PD load always reloads the page dirs */
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
}
 
if (ring != &dev_priv->ring[RCS]) {
610,23 → 708,48
if (ret)
goto unpin_out;
 
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
if (!(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
GLOBAL_BIND);
 
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT;
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
} else if (to->ppgtt &&
(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE;
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
}
 
ret = mi_set_context(ring, to, hw_flags);
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(ring, to) &&
needs_pd_load_post(ring, to, hw_flags));
 
ret = mi_set_context(req, hw_flags);
if (ret)
goto unpin_out;
 
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
if (needs_pd_load_post(ring, to, hw_flags)) {
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
* happened.
*/
if (ret) {
DRM_ERROR("Failed to change address space on context switch\n");
goto unpin_out;
}
}
 
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
 
ret = i915_gem_l3_remap(ring, i);
ret = i915_gem_l3_remap(req, i);
/* If it failed, try again next round */
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
642,7 → 765,7
*/
if (from != NULL) {
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
651,7 → 774,6
* swapped, but there is no way to do that yet.
*/
from->legacy_hw_ctx.rcs_state->dirty = 1;
BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
 
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
658,7 → 780,7
i915_gem_context_unreference(from);
}
 
uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
uninitialized = !to->legacy_hw_ctx.initialized;
to->legacy_hw_ctx.initialized = true;
 
done:
667,14 → 789,10
 
if (uninitialized) {
if (ring->init_context) {
ret = ring->init_context(ring, to);
ret = ring->init_context(req);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
 
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
}
 
return 0;
687,8 → 805,7
 
/**
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
* @to: the context to switch to
* @req: request for which we'll execute the context switch
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
699,25 → 816,25
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/
int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *to)
int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) {
i915_gem_context_reference(to);
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (req->ctx != ring->last_context) {
i915_gem_context_reference(req->ctx);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
ring->last_context = to;
ring->last_context = req->ctx;
}
return 0;
}
 
return do_switch(ring, to);
return do_switch(req);
}
 
static bool contexts_enabled(struct drm_device *dev)
779,3 → 896,82
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
return 0;
}
 
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct intel_context *ctx;
int ret;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
ctx = i915_gem_context_get(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
 
args->size = 0;
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
args->value = ctx->hang_stats.ban_period_seconds;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct intel_context *ctx;
int ret;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
ctx = i915_gem_context_get(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
 
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
if (args->size)
ret = -EINVAL;
else if (args->value < ctx->hang_stats.ban_period_seconds)
ret = -EPERM;
else
ctx->hang_stats.ban_period_seconds = args->value;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
if (args->size) {
ret = -EINVAL;
} else {
ctx->flags &= ~CONTEXT_NO_ZEROMAP;
ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
}
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
/drivers/video/drm/i915/i915_gem_evict.c
50,11 → 50,12
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @dev: drm_device
* @vm: address space to evict from
* @size: size of the desired free space
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
* @cache_level: cache_level for the desired space
* @mappable: whether the free space must be mappable
* @nonblocking: whether evicting active objects is allowed or not
* @start: start (inclusive) of the range from which to evict objects
* @end: end (exclusive) of the range from which to evict objects
* @flags: additional flags to control the eviction algorithm
*
* This function will try to evict vmas until a free space satisfying the
* requirements is found. Callers must check first whether any such hole exists
62,6 → 63,10
*
* This function is used by the object/vma binding code.
*
* Since this function is only used to free up virtual address space it only
* ignores pinned vmas, and not object where the backing storage itself is
* pinned. Hence obj->pages_pin_count does not protect against eviction.
*
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
196,7 → 201,6
 
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
*
* @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
214,6 → 218,7
struct i915_vma *vma, *next;
int ret;
 
WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
trace_i915_gem_evict_vm(vm);
 
if (do_idle) {
222,6 → 227,8
return ret;
 
i915_gem_retire_requests(vm->dev);
 
WARN_ON(!list_empty(&vm->active_list));
}
 
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
230,48 → 237,3
 
return 0;
}
 
/**
* i915_gem_evict_everything - Try to evict all objects
* @dev: Device to evict objects for
*
* This functions tries to evict all gem objects from all address spaces. Used
* by the shrinker as a last-ditch effort and for suspend, before releasing the
* backing storage of all unbound objects.
*/
int
i915_gem_evict_everything(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm, *v;
bool lists_empty = true;
int ret;
 
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
lists_empty = (list_empty(&vm->inactive_list) &&
list_empty(&vm->active_list));
if (!lists_empty)
lists_empty = false;
}
 
if (lists_empty)
return -ENOSPC;
 
trace_i915_gem_evict_everything(dev);
 
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
* with retire requests.
*/
ret = i915_gpu_idle(dev);
if (ret)
return ret;
 
i915_gem_retire_requests(dev);
 
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
WARN_ON(i915_gem_evict_vm(vm, false));
 
return 0;
}
/drivers/video/drm/i915/i915_gem_execbuffer.c
252,7 → 252,6
{
return (HAS_LLC(obj->base.dev) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
!obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
 
320,8 → 319,53
reloc_page = dev_priv->gtt.mappable;
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
// io_mapping_unmap_atomic(reloc_page);
 
return 0;
}
 
static void
clflush_write32(void *addr, uint32_t value)
{
/* This is not a fast path, so KISS. */
drm_clflush_virt_range(addr, sizeof(uint32_t));
*(uint32_t *)addr = value;
drm_clflush_virt_range(addr, sizeof(uint32_t));
}
 
static int
relocate_entry_clflush(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = (int)reloc->delta + target_offset;
char *vaddr;
int ret;
 
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
 
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
reloc->offset >> PAGE_SHIFT));
clflush_write32(vaddr + page_offset, lower_32_bits(delta));
 
if (INTEL_INFO(dev)->gen >= 8) {
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 
if (page_offset == 0) {
kunmap_atomic(vaddr);
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
}
 
clflush_write32(vaddr + page_offset, upper_32_bits(delta));
}
 
kunmap_atomic(vaddr);
 
return 0;
}
 
350,10 → 394,12
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!(target_vma->bound & GLOBAL_BIND)))
target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
GLOBAL_BIND);
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
PIN_GLOBAL);
if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
return ret;
}
 
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
409,8 → 455,14
 
if (use_cpu_reloc(obj))
ret = relocate_entry_cpu(obj, reloc, target_offset);
else
else if (obj->map_and_fenceable)
ret = relocate_entry_gtt(obj, reloc, target_offset);
else if (1)
ret = relocate_entry_clflush(obj, reloc, target_offset);
else {
WARN_ONCE(1, "Impossible case in relocation handling\n");
ret = -ENODEV;
}
 
if (ret)
return ret;
507,6 → 559,12
return ret;
}
 
static bool only_mappable_for_reloc(unsigned int flags)
{
return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
__EXEC_OBJECT_NEEDS_MAP;
}
 
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring,
517,15 → 575,30
uint64_t flags;
int ret;
 
flags = 0;
flags = PIN_USER;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
 
if (!drm_mm_node_allocated(&vma->node)) {
/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
* limit address to the first 4GBs for unflagged objects.
*/
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
flags |= PIN_ZONE_4G;
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
flags |= PIN_GLOBAL | PIN_MAPPABLE;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
if ((flags & PIN_MAPPABLE) == 0)
flags |= PIN_HIGH;
}
 
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
if ((ret == -ENOSPC || ret == -E2BIG) &&
only_mappable_for_reloc(entry->flags))
ret = i915_gem_object_pin(obj, vma->vm,
entry->alignment,
flags & ~PIN_MAPPABLE);
if (ret)
return ret;
 
587,13 → 660,18
vma->node.start & (entry->alignment - 1))
return true;
 
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
return true;
 
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
 
/* avoid costly ping-pong once a batch bo ended up non-mappable */
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
return !only_mappable_for_reloc(entry->flags);
 
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
(vma->node.start + vma->node.size - 1) >> 32)
return true;
 
return false;
}
 
600,6 → 678,7
static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct list_head *vmas,
struct intel_context *ctx,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
622,6 → 701,9
obj = vma->obj;
entry = vma->exec_entry;
 
if (ctx->flags & CONTEXT_NO_ZEROMAP)
entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
 
if (!has_fenced_gpu_access)
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
699,7 → 781,8
struct drm_file *file,
struct intel_engine_cs *ring,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec)
struct drm_i915_gem_exec_object2 *exec,
struct intel_context *ctx)
{
struct drm_i915_gem_relocation_entry *reloc;
struct i915_address_space *vm;
725,8 → 808,8
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
 
reloc_offset = malloc(count * sizeof(*reloc_offset));
reloc = malloc(total * sizeof(*reloc));
reloc_offset = __builtin_malloc(count * sizeof(*reloc_offset));
reloc = __builtin_malloc(total * sizeof(*reloc));
if (reloc == NULL || reloc_offset == NULL) {
kfree(reloc);
kfree(reloc_offset);
785,7 → 868,7
goto err;
 
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
if (ret)
goto err;
 
810,9 → 893,10
}
 
static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
820,9 → 904,12
 
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring);
 
if (obj->active & other_rings) {
ret = i915_gem_object_sync(obj, req->ring, &req);
if (ret)
return ret;
}
 
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false);
831,7 → 918,7
}
 
if (flush_chipset)
i915_gem_chipset_flush(ring->dev);
i915_gem_chipset_flush(req->ring->dev);
 
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
839,7 → 926,7
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
return intel_ring_invalidate_all_caches(ring);
return intel_ring_invalidate_all_caches(req);
}
 
static bool
848,9 → 935,23
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
return false;
 
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
/* Kernel clipping was a DRI1 misfeature */
if (exec->num_cliprects || exec->cliprects_ptr)
return false;
 
if (exec->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
exec->DR4 = 0;
}
if (exec->DR1 || exec->DR4)
return false;
 
if ((exec->batch_start_offset | exec->batch_len) & 0x7)
return false;
 
return true;
}
 
static int
validate_exec_list(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec,
872,6 → 973,9
if (exec[i].flags & invalid_flags)
return -EINVAL;
 
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL;
 
/* First check for malicious input causing overflow in
* the worst case where we need to allocate the entire
* relocation tree as a single array.
913,7 → 1017,7
}
 
if (i915.enable_execlists && !ctx->engine[ring->id].state) {
int ret = intel_lr_context_deferred_create(ctx, ring);
int ret = intel_lr_context_deferred_alloc(ctx, ring);
if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret);
925,9 → 1029,9
 
void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring)
struct drm_i915_gem_request *req)
{
u32 seqno = intel_ring_get_seqno(ring);
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct i915_vma *vma;
 
list_for_each_entry(vma, vmas, exec_list) {
936,23 → 1040,23
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
 
obj->dirty = 1; /* be paranoid */
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
 
i915_vma_move_to_active(vma, ring);
i915_vma_move_to_active(vma, req);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = seqno;
i915_gem_request_assign(&obj->last_write_req, req);
 
intel_fb_obj_invalidate(obj, ring);
intel_fb_obj_invalidate(obj, ORIGIN_CS);
 
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
obj->last_fenced_seqno = seqno;
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
965,22 → 1069,20
}
 
void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj)
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
params->ring->gpu_caches_dirty = true;
 
/* Add a breadcrumb for the completion of the batch buffer */
(void)__i915_add_request(ring, file, obj, NULL);
__i915_add_request(params->request, params->batch_obj, true);
}
 
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_engine_cs *ring)
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
 
989,7 → 1091,7
return -EINVAL;
}
 
ret = intel_ring_begin(ring, 4 * 3);
ret = intel_ring_begin(req, 4 * 3);
if (ret)
return ret;
 
1004,114 → 1106,83
return 0;
}
 
static int
i915_emit_box(struct intel_engine_cs *ring,
struct drm_clip_rect *box,
int DR1, int DR4)
static struct drm_i915_gem_object*
i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
u32 batch_len,
bool is_master)
{
struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma;
int ret;
 
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
 
if (INTEL_INFO(ring->dev)->gen >= 4) {
ret = intel_ring_begin(ring, 4);
ret = i915_parse_cmds(ring,
batch_obj,
shadow_batch_obj,
batch_start_offset,
batch_len,
is_master);
if (ret)
return ret;
goto err;
 
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
intel_ring_emit(ring, DR4);
} else {
ret = intel_ring_begin(ring, 6);
ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
if (ret)
return ret;
goto err;
 
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
intel_ring_emit(ring, DR1);
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
intel_ring_emit(ring, DR4);
intel_ring_emit(ring, 0);
}
intel_ring_advance(ring);
i915_gem_object_unpin_pages(shadow_batch_obj);
 
return 0;
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
 
vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
drm_gem_object_reference(&shadow_batch_obj->base);
list_add_tail(&vma->exec_list, &eb->vmas);
 
shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
 
return shadow_batch_obj;
 
err:
i915_gem_object_unpin_pages(shadow_batch_obj);
if (ret == -EACCES) /* unhandled chained batch */
return batch_obj;
else
return ERR_PTR(ret);
}
 
 
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
struct list_head *vmas)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_device *dev = params->dev;
struct intel_engine_cs *ring = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_len;
u64 exec_start, exec_len;
int instp_mode;
u32 instp_mask;
int i, ret = 0;
int ret;
 
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
 
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
}
 
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
 
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto error;
}
 
if (copy_from_user(cliprects,
to_user_ptr(args->cliprects_ptr),
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto error;
}
} else {
if (args->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
args->DR4 = 0;
}
 
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
}
}
 
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (ret)
goto error;
return ret;
 
ret = i915_switch_context(ring, ctx);
ret = i915_switch_context(params->request);
if (ret)
goto error;
return ret;
 
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
"%s didn't clear reload\n", ring->name);
 
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
1120,22 → 1191,19
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
ret = -EINVAL;
goto error;
return -EINVAL;
}
 
if (instp_mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("no rel constants on pre-gen4\n");
ret = -EINVAL;
goto error;
return -EINVAL;
}
 
if (INTEL_INFO(dev)->gen > 5 &&
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
ret = -EINVAL;
goto error;
return -EINVAL;
}
 
/* The HW changed the meaning on this bit on gen6 */
1145,15 → 1213,14
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
ret = -EINVAL;
goto error;
return -EINVAL;
}
 
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(params->request, 4);
if (ret)
goto error;
return ret;
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1165,41 → 1232,27
}
 
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
ret = i915_reset_gen7_sol_offsets(dev, params->request);
if (ret)
goto error;
return ret;
}
 
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(ring, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
exec_start = params->batch_obj_vm_offset +
params->args_batch_start_offset;
 
ret = ring->dispatch_execbuffer(ring,
ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
flags);
params->dispatch_flags);
if (ret)
goto error;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
return ret;
}
 
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
i915_gem_execbuffer_move_to_active(vmas, params->request);
i915_gem_execbuffer_retire_commands(params);
 
error:
kfree(cliprects);
return ret;
return 0;
}
 
/**
1261,12 → 1314,14
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
struct i915_execbuffer_params *params = &params_master;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset;
u32 flags;
u32 dispatch_flags;
int ret;
bool need_relocs;
 
1277,13 → 1332,13
if (ret)
return ret;
 
flags = 0;
dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
 
flags |= I915_DISPATCH_SECURE;
dispatch_flags |= I915_DISPATCH_SECURE;
}
if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED;
dispatch_flags |= I915_DISPATCH_PINNED;
 
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
1291,13 → 1346,35
return -EINVAL;
}
 
if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
 
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
 
switch (args->flags & I915_EXEC_BSD_MASK) {
case I915_EXEC_BSD_DEFAULT:
ring_id = gen8_dispatch_bsd_ring(dev, file);
ring = &dev_priv->ring[ring_id];
break;
case I915_EXEC_BSD_RING1:
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BSD_RING2:
ring = &dev_priv->ring[VCS2];
break;
default:
DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
(int)(args->flags & I915_EXEC_BSD_MASK));
return -EINVAL;
}
} else
ring = &dev_priv->ring[VCS];
} else
1314,6 → 1391,20
return -EINVAL;
}
 
if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
if (!HAS_RESOURCE_STREAMER(dev)) {
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
if (ring->id != RCS) {
DRM_DEBUG("RS is not available on %s\n",
ring->name);
return -EINVAL;
}
 
dispatch_flags |= I915_DISPATCH_RS;
}
 
intel_runtime_pm_get(dev_priv);
 
ret = i915_mutex_lock_interruptible(dev);
1334,6 → 1425,8
else
vm = &dev_priv->gtt.base;
 
memset(&params_master, 0x00, sizeof(params_master));
 
eb = eb_create(args);
if (eb == NULL) {
i915_gem_context_unreference(ctx);
1352,7 → 1445,7
 
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
if (ret)
goto err;
 
1362,7 → 1455,7
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec);
eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
1375,33 → 1468,53
ret = -EINVAL;
goto err;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
params->args_batch_start_offset = args->batch_start_offset;
 
#if 0
if (i915_needs_cmd_parser(ring)) {
ret = i915_parse_cmds(ring,
if (i915_needs_cmd_parser(ring) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
 
parsed_batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
if (ret) {
if (ret != -EACCES)
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
} else {
}
 
/*
* XXX: Actually do this when enabling batch copy...
* parsed_batch_obj == batch_obj means batch not fully parsed:
* Accept, but don't promote to secure.
*/
 
if (parsed_batch_obj != batch_obj) {
/*
* Batch parsed and accepted:
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
* from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically don't
* want that set when the command parser is enabled.
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in
* the dispatch_execbuffer implementations. We
* specifically don't want that set on batches the
* command parser has accepted.
*/
dispatch_flags |= I915_DISPATCH_SECURE;
params->args_batch_start_offset = 0;
batch_obj = parsed_batch_obj;
}
}
#endif
 
#endif
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE) {
if (dispatch_flags & I915_DISPATCH_SECURE) {
/*
* So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But:
1408,7 → 1521,7
* - The batch is already pinned into the relevant ppgtt, so we
* already have the backing storage fully allocated.
* - No other BO uses the global gtt (well contexts, but meh),
* so we don't really have issues with mutliple objects not
* so we don't really have issues with multiple objects not
* fitting due to fragmentation.
* So this is actually safe.
*/
1416,26 → 1529,57
if (ret)
goto err;
 
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
} else
exec_start += i915_gem_obj_offset(batch_obj, vm);
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
&eb->vmas, batch_obj, exec_start, flags);
/* Allocate a request for this batch buffer nice and early. */
ret = i915_gem_request_alloc(ring, ctx, &params->request);
if (ret)
goto err_batch_unpin;
 
ret = i915_gem_request_add_to_client(params->request, file);
if (ret)
goto err_batch_unpin;
 
/*
* Save assorted stuff away to pass through to *_submission().
* NB: This data should be 'persistent' and not local as it will
* kept around beyond the duration of the IOCTL once the GPU
* scheduler arrives.
*/
params->dev = dev;
params->file = file;
params->ring = ring;
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
 
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 
err_batch_unpin:
/*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
* batch vma for correctness. For less ugly and less fragility this
* needs to be adjusted to also track the ggtt batch vma properly as
* active.
*/
if (flags & I915_DISPATCH_SECURE)
if (dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
 
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
eb_destroy(eb);
 
/*
* If the request was created but not successfully submitted then it
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
if (ret && params->request)
i915_gem_request_cancel(params->request);
 
mutex_unlock(&dev->struct_mutex);
 
pre_mutex_err:
/drivers/video/drm/i915/i915_gem_fence.c
0,0 → 1,799
/*
* Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
/**
* DOC: fence register handling
*
* Important to avoid confusions: "fences" in the i915 driver are not execution
* fences used to track command completion but hardware detiler objects which
* wrap a given range of the global GTT. Each platform has only a fairly limited
* set of these objects.
*
* Fences are used to detile GTT memory mappings. They're also connected to the
* hardware frontbuffer render tracking and hence interract with frontbuffer
* conmpression. Furthermore on older platforms fences are required for tiled
* objects used by the display engine. They can also be used by the render
* engine - they're required for blitter commands and are optional for render
* commands. But on gen4+ both display (with the exception of fbc) and rendering
* have their own tiling state bits and don't need fences.
*
* Also note that fences only support X and Y tiling and hence can't be used for
* the fancier new tiling formats like W, Ys and Yf.
*
* Finally note that because fences are such a restricted resource they're
* dynamically associated with objects. Furthermore fence state is committed to
* the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
* explictly call i915_gem_object_get_fence() to synchronize fencing status
* for cpu access. Also note that some code wants an unfenced view, for those
* cases the fence can be removed forcefully with i915_gem_object_put_fence().
*
* Internally these functions will synchronize with userspace access by removing
* CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
*/
 
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
 
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(reg);
fence_reg_hi = FENCE_REG_GEN6_HI(reg);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
} else {
fence_reg_lo = FENCE_REG_965_LO(reg);
fence_reg_hi = FENCE_REG_965_HI(reg);
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
 
/* To w/a incoherency with non-atomic 64-bit register updates,
* we split the 64-bit update into two 32-bit writes. In order
* for a partial fence not to be evaluated between writes, we
* precede the update with write to turn off the fence register,
* and only enable the fence as the last step.
*
* For extra levels of paranoia, we make sure each step lands
* before applying the next step.
*/
I915_WRITE(fence_reg_lo, 0);
POSTING_READ(fence_reg_lo);
 
if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
 
/* Adjust fence size to match tiled area */
if (obj->tiling_mode != I915_TILING_NONE) {
uint32_t row_size = obj->stride *
(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
size = (size / row_size) * row_size;
}
 
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
 
I915_WRITE(fence_reg_hi, val >> 32);
POSTING_READ(fence_reg_hi);
 
I915_WRITE(fence_reg_lo, val);
POSTING_READ(fence_reg_lo);
} else {
I915_WRITE(fence_reg_hi, 0);
POSTING_READ(fence_reg_hi);
}
}
 
static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val;
int tile_width;
 
WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
 
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
else
tile_width = 512;
 
/* Note: pitch better be a power of two tile widths */
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
 
val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
} else
val = 0;
 
I915_WRITE(FENCE_REG(reg), val);
POSTING_READ(FENCE_REG(reg));
}
 
static void i830_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
 
if (obj) {
u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val;
 
WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
(i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
i915_gem_obj_ggtt_offset(obj), size);
 
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
 
val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
} else
val = 0;
 
I915_WRITE(FENCE_REG(reg), val);
POSTING_READ(FENCE_REG(reg));
}
 
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
{
return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
}
 
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
 
WARN(obj && (!obj->stride || !obj->tiling_mode),
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
obj->stride, obj->tiling_mode);
 
if (IS_GEN2(dev))
i830_write_fence_reg(dev, reg, obj);
else if (IS_GEN3(dev))
i915_write_fence_reg(dev, reg, obj);
else if (INTEL_INFO(dev)->gen >= 4)
i965_write_fence_reg(dev, reg, obj);
 
/* And similarly be paranoid that no direct access to this region
* is reordered to before the fence is installed.
*/
if (i915_gem_object_needs_mb(obj))
mb();
}
 
static inline int fence_number(struct drm_i915_private *dev_priv,
struct drm_i915_fence_reg *fence)
{
return fence - dev_priv->fence_regs;
}
 
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int reg = fence_number(dev_priv, fence);
 
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
 
if (enable) {
obj->fence_reg = reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
obj->fence_reg = I915_FENCE_REG_NONE;
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
obj->fence_dirty = false;
}
 
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
 
/* As we do not have an associated fence register, we will force
* a tiling change if we ever need to acquire one.
*/
obj->fence_dirty = false;
obj->fence_reg = I915_FENCE_REG_NONE;
}
 
static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_req) {
int ret = i915_wait_request(obj->last_fenced_req);
if (ret)
return ret;
 
i915_gem_request_assign(&obj->last_fenced_req, NULL);
}
 
return 0;
}
 
/**
* i915_gem_object_put_fence - force-remove fence for an object
* @obj: object to map through a fence reg
*
* This function force-removes any fence from the given object, which is useful
* if the kernel wants to do untiled GTT access.
*
* Returns:
*
* 0 on success, negative error code on failure.
*/
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_fence_reg *fence;
int ret;
 
ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
 
if (obj->fence_reg == I915_FENCE_REG_NONE)
return 0;
 
fence = &dev_priv->fence_regs[obj->fence_reg];
 
if (WARN_ON(fence->pin_count))
return -EBUSY;
 
i915_gem_object_fence_lost(obj);
i915_gem_object_update_fence(obj, fence, false);
 
return 0;
}
 
static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_fence_reg *reg, *avail;
int i;
 
/* First try to find a free reg */
avail = NULL;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
return reg;
 
if (!reg->pin_count)
avail = reg;
}
 
if (avail == NULL)
goto deadlock;
 
/* None available, try to steal one or wait for a user to finish */
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
if (reg->pin_count)
continue;
 
return reg;
}
 
deadlock:
/* Wait for completion of pending flips which consume fences */
if (intel_has_pending_fb_unpin(dev))
return ERR_PTR(-EAGAIN);
 
return ERR_PTR(-EDEADLK);
}
 
/**
* i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
*
* For an untiled surface, this removes any existing fence.
*
* Returns:
*
* 0 on success, negative error code on failure.
*/
int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
 
/* Have we updated the tiling parameters upon the object and so
* will need to serialise the write to the associated fence register?
*/
if (obj->fence_dirty) {
ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
}
 
/* Just update our place in the LRU if our fence is getting reused. */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj->fence_reg];
if (!obj->fence_dirty) {
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
return 0;
}
} else if (enable) {
if (WARN_ON(!obj->map_and_fenceable))
return -EINVAL;
 
reg = i915_find_fence_reg(dev);
if (IS_ERR(reg))
return PTR_ERR(reg);
 
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
 
ret = i915_gem_object_wait_fence(old);
if (ret)
return ret;
 
i915_gem_object_fence_lost(old);
}
} else
return 0;
 
i915_gem_object_update_fence(obj, reg, enable);
 
return 0;
}
 
/**
* i915_gem_object_pin_fence - pin fencing state
* @obj: object to pin fencing for
*
* This pins the fencing state (whether tiled or untiled) to make sure the
* object is ready to be used as a scanout target. Fencing status must be
* synchronize first by calling i915_gem_object_get_fence():
*
* The resulting fence pin reference must be released again with
* i915_gem_object_unpin_fence().
*
* Returns:
*
* True if the object has a fence, false otherwise.
*/
bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
 
WARN_ON(!ggtt_vma ||
dev_priv->fence_regs[obj->fence_reg].pin_count >
ggtt_vma->pin_count);
dev_priv->fence_regs[obj->fence_reg].pin_count++;
return true;
} else
return false;
}
 
/**
* i915_gem_object_unpin_fence - unpin fencing state
* @obj: object to unpin fencing for
*
* This releases the fence pin reference acquired through
* i915_gem_object_pin_fence. It will handle both objects with and without an
* attached fence correctly, callers do not need to distinguish this.
*/
void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
}
 
/**
* i915_gem_restore_fences - restore fence state
* @dev: DRM device
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume.
*/
void i915_gem_restore_fences(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 
/*
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
if (reg->obj) {
i915_gem_object_update_fence(reg->obj, reg,
reg->obj->tiling_mode);
} else {
i915_gem_write_fence(dev, i, NULL);
}
}
}
 
/**
* DOC: tiling swizzling details
*
* The idea behind tiling is to increase cache hit rates by rearranging
* pixel data so that a group of pixel accesses are in the same cacheline.
* Performance improvement from doing this on the back/depth buffer are on
* the order of 30%.
*
* Intel architectures make this somewhat more complicated, though, by
* adjustments made to addressing of data when the memory is in interleaved
* mode (matched pairs of DIMMS) to improve memory bandwidth.
* For interleaved memory, the CPU sends every sequential 64 bytes
* to an alternate memory channel so it can get the bandwidth from both.
*
* The GPU also rearranges its accesses for increased bandwidth to interleaved
* memory, and it matches what the CPU does for non-tiled. However, when tiled
* it does it a little differently, since one walks addresses not just in the
* X direction but also Y. So, along with alternating channels when bit
* 6 of the address flips, it also alternates when other bits flip -- Bits 9
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
* are common to both the 915 and 965-class hardware.
*
* The CPU also sometimes XORs in higher bits as well, to improve
* bandwidth doing strided access like we do so frequently in graphics. This
* is called "Channel XOR Randomization" in the MCH documentation. The result
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
* decode.
*
* All of this bit 6 XORing has an effect on our memory management,
* as we need to make sure that the 3d driver can correctly address object
* contents.
*
* If we don't have interleaved memory, all tiling is safe and no swizzling is
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an objet out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
* Otherwise, if interleaved, we have to tell the 3d driver what the address
* swizzling it needs to do is, since it's writing with the CPU to the pages
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
* to match what the GPU expects.
*/
 
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
* @dev: DRM device
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
* the extra address manipulation GPU side.
*
* VLV and CHV don't have GPU swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/* Enable swizzling when the channels are populated
* with identically sized dimms. We don't need to check
* the 3rd channel because no cpu with gpu attached
* ships in that configuration. Also, swizzling only
* makes sense for 2 channels anyway. */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
 
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
* 9 for Y tiled. The CPU's interleave is independent, and
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
/* This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
swizzle_y = I915_BIT_6_SWIZZLE_9_17;
}
break;
}
 
/* check for L-shaped memory aka modified enhanced addressing */
if (IS_GEN4(dev) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
 
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*
* Reports indicate that the swizzling actually
* varies depending upon page placement inside the
* channels, i.e. we see swizzled pages where the
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
 
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
/* Userspace likes to explode if it sees unknown swizzling,
* so lie. We will finish the lie when reporting through
* the get-tiling-ioctl by reporting the physical swizzle
* mode as unknown instead.
*
* As we don't strictly know what the swizzling is, it may be
* bit17 dependent, and so we need to also prevent the pages
* from being moved.
*/
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
 
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
 
/*
* Swap every 64 bytes of this page around, to account for it having a new
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
static void
i915_gem_swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
int i;
 
vaddr = kmap(page);
 
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
memcpy(&vaddr[i], &vaddr[i + 64], 64);
memcpy(&vaddr[i + 64], temp, 64);
}
 
kunmap(page);
}
 
/**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object
*
* This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with
* i915_gem_object_save_bit_17_swizzle().
*
* This is called when pinning backing storage again, since the kernel is free
* to move unpinned backing storage around (either by directly moving pages or
* by swapping them out and back in again).
*/
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct sg_page_iter sg_iter;
int i;
 
if (obj->bit_17 == NULL)
return;
 
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
}
i++;
}
}
 
/**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object
*
* This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned.
*/
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct sg_page_iter sg_iter;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
 
if (obj->bit_17 == NULL) {
obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
}
}
 
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
i++;
}
}
/drivers/video/drm/i915/i915_gem_gtt.c
27,14 → 27,79
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
 
#include <asm/cacheflush.h>
/**
* DOC: Global GTT views
*
* Background and previous state
*
* Historically objects could exists (be bound) in global GTT space only as
* singular instances with a view representing all of the object's backing pages
* in a linear fashion. This view will be called a normal view.
*
* To support multiple views of the same object, where the number of mapped
* pages is not equal to the backing store, or where the layout of the pages
* is not linear, concept of a GGTT view was added.
*
* One example of an alternative view is a stereo display driven by a single
* image. In this case we would have a framebuffer looking like this
* (2x2 pages):
*
* 12
* 34
*
* Above would represent a normal GGTT view as normally mapped for GPU or CPU
* rendering. In contrast, fed to the display engine would be an alternative
* view which could look something like this:
*
* 1212
* 3434
*
* In this example both the size and layout of pages in the alternative view is
* different from the normal view.
*
* Implementation and usage
*
* GGTT views are implemented using VMAs and are distinguished via enum
* i915_ggtt_view_type and struct i915_ggtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
* added with the _ggtt_ infix, and sometimes with _view postfix to avoid
* renaming in large amounts of code. They take the struct i915_ggtt_view
* parameter encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
* globally const i915_ggtt_view_normal singleton instance exists. All old core
* GEM API functions, the ones not taking the view parameter, are operating on,
* or with the normal GGTT view.
*
* Code wanting to add or use a new GGTT view needs to:
*
* 1. Add a new enum with a suitable name.
* 2. Extend the metadata in the i915_ggtt_view structure if required.
* 3. Add support to i915_get_vma_pages().
*
* New views are required to build a scatter-gather table from within the
* i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
* exists for the lifetime of an VMA.
*
* Core API is designed to have copy semantics which means that passed in
* struct i915_ggtt_view does not need to be persistent (left around after
* calling the core API functions).
*
*/
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
const struct i915_ggtt_view i915_ggtt_view_normal;
const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED
};
 
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
bool has_aliasing_ppgtt;
42,9 → 107,10
 
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
if (IS_GEN8(dev))
has_full_ppgtt = false; /* XXX why? */
 
if (intel_vgpu_active(dev))
has_full_ppgtt = false; /* emulation is too hard */
 
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
74,20 → 140,41
return 0;
}
 
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
return 2;
else
return has_aliasing_ppgtt ? 1 : 0;
}
 
 
static void ppgtt_bind_vma(struct i915_vma *vma,
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
u32 unused)
{
u32 pte_flags = 0;
 
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
 
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
cache_level, pte_flags);
 
return 0;
}
 
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm,
vma->node.start,
vma->obj->base.size,
true);
}
 
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid)
{
gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
 
switch (level) {
105,11 → 192,10
return pte;
}
 
static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
const enum i915_cache_level level)
{
gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE_INDEX;
118,11 → 204,14
return pde;
}
 
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode
 
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
switch (level) {
134,17 → 223,17
pte |= GEN6_PTE_UNCACHED;
break;
default:
WARN_ON(1);
MISSING_CASE(level);
}
 
return pte;
}
 
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
switch (level) {
158,17 → 247,17
pte |= GEN6_PTE_UNCACHED;
break;
default:
WARN_ON(1);
MISSING_CASE(level);
}
 
return pte;
}
 
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
if (!(flags & PTE_READ_ONLY))
180,11 → 269,11
return pte;
}
 
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
 
if (level != I915_CACHE_NONE)
193,11 → 282,11
return pte;
}
 
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
 
switch (level) {
214,40 → 303,376
return pte;
}
 
static int __setup_page_dma(struct drm_device *dev,
struct i915_page_dma *p, gfp_t flags)
{
struct device *device = &dev->pdev->dev;
 
p->page = alloc_page(flags);
if (!p->page)
return -ENOMEM;
 
p->daddr = page_to_phys(p->page);
 
return 0;
}
 
static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
{
return __setup_page_dma(dev, p, GFP_KERNEL);
}
 
static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
{
if (WARN_ON(!p->page))
return;
 
__free_page(p->page);
memset(p, 0, sizeof(*p));
}
 
static void *kmap_page_dma(struct i915_page_dma *p)
{
return kmap_atomic(p->page);
}
 
/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
{
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
 
kunmap_atomic(vaddr);
}
 
#define kmap_px(px) kmap_page_dma(px_base(px))
#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
 
#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
 
static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_page_dma(p);
 
for (i = 0; i < 512; i++)
vaddr[i] = val;
 
kunmap_page_dma(dev, vaddr);
}
 
static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
const uint32_t val32)
{
uint64_t v = val32;
 
v = v << 32 | val32;
 
fill_page_dma(dev, p, v);
}
 
static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
{
struct i915_page_scratch *sp;
int ret;
 
sp = kzalloc(sizeof(*sp), GFP_KERNEL);
if (sp == NULL)
return ERR_PTR(-ENOMEM);
 
ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
if (ret) {
kfree(sp);
return ERR_PTR(ret);
}
 
// set_pages_uc(px_page(sp), 1);
 
return sp;
}
 
static void free_scratch_page(struct drm_device *dev,
struct i915_page_scratch *sp)
{
// set_pages_wb(px_page(sp), 1);
 
cleanup_px(dev, sp);
kfree(sp);
}
 
static struct i915_page_table *alloc_pt(struct drm_device *dev)
{
struct i915_page_table *pt;
const size_t count = INTEL_INFO(dev)->gen >= 8 ?
GEN8_PTES : GEN6_PTES;
int ret = -ENOMEM;
 
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
if (!pt)
return ERR_PTR(-ENOMEM);
 
pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
GFP_KERNEL);
 
if (!pt->used_ptes)
goto fail_bitmap;
 
ret = setup_px(dev, pt);
if (ret)
goto fail_page_m;
 
return pt;
 
fail_page_m:
kfree(pt->used_ptes);
fail_bitmap:
kfree(pt);
 
return ERR_PTR(ret);
}
 
static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
{
cleanup_px(dev, pt);
kfree(pt->used_ptes);
kfree(pt);
}
 
static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
gen8_pte_t scratch_pte;
 
scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true);
 
fill_px(vm->dev, pt, scratch_pte);
}
 
static void gen6_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
gen6_pte_t scratch_pte;
 
WARN_ON(px_dma(vm->scratch_page) == 0);
 
scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true, 0);
 
fill32_px(vm->dev, pt, scratch_pte);
}
 
static struct i915_page_directory *alloc_pd(struct drm_device *dev)
{
struct i915_page_directory *pd;
int ret = -ENOMEM;
 
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
 
pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
sizeof(*pd->used_pdes), GFP_KERNEL);
if (!pd->used_pdes)
goto fail_bitmap;
 
ret = setup_px(dev, pd);
if (ret)
goto fail_page_m;
 
return pd;
 
fail_page_m:
kfree(pd->used_pdes);
fail_bitmap:
kfree(pd);
 
return ERR_PTR(ret);
}
 
static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
{
if (px_page(pd)) {
cleanup_px(dev, pd);
kfree(pd->used_pdes);
kfree(pd);
}
}
 
static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
gen8_pde_t scratch_pde;
 
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
 
fill_px(vm->dev, pd, scratch_pde);
}
 
static int __pdp_init(struct drm_device *dev,
struct i915_page_directory_pointer *pdp)
{
size_t pdpes = I915_PDPES_PER_PDP(dev);
 
pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
sizeof(unsigned long),
GFP_KERNEL);
if (!pdp->used_pdpes)
return -ENOMEM;
 
pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
GFP_KERNEL);
if (!pdp->page_directory) {
kfree(pdp->used_pdpes);
/* the PDP might be the statically allocated top level. Keep it
* as clean as possible */
pdp->used_pdpes = NULL;
return -ENOMEM;
}
 
return 0;
}
 
static void __pdp_fini(struct i915_page_directory_pointer *pdp)
{
kfree(pdp->used_pdpes);
kfree(pdp->page_directory);
pdp->page_directory = NULL;
}
 
static struct
i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
 
WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
 
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
return ERR_PTR(-ENOMEM);
 
ret = __pdp_init(dev, pdp);
if (ret)
goto fail_bitmap;
 
ret = setup_px(dev, pdp);
if (ret)
goto fail_page_m;
 
return pdp;
 
fail_page_m:
__pdp_fini(pdp);
fail_bitmap:
kfree(pdp);
 
return ERR_PTR(ret);
}
 
static void free_pdp(struct drm_device *dev,
struct i915_page_directory_pointer *pdp)
{
__pdp_fini(pdp);
if (USES_FULL_48BIT_PPGTT(dev)) {
cleanup_px(dev, pdp);
kfree(pdp);
}
}
 
static void gen8_initialize_pdp(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp)
{
gen8_ppgtt_pdpe_t scratch_pdpe;
 
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
 
fill_px(vm->dev, pdp, scratch_pdpe);
}
 
static void gen8_initialize_pml4(struct i915_address_space *vm,
struct i915_pml4 *pml4)
{
gen8_ppgtt_pml4e_t scratch_pml4e;
 
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
 
fill_px(vm->dev, pml4, scratch_pml4e);
}
 
static void
gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
struct i915_page_directory *pd,
int index)
{
gen8_ppgtt_pdpe_t *page_directorypo;
 
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
return;
 
page_directorypo = kmap_px(pdp);
page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
kunmap_px(ppgtt, page_directorypo);
}
 
static void
gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
struct i915_pml4 *pml4,
struct i915_page_directory_pointer *pdp,
int index)
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
 
WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_px(ppgtt, pagemap);
}
 
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val)
static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
BUG_ON(entry >= 4);
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, (u32)(val >> 32));
intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, (u32)(val));
intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring);
 
return 0;
}
 
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring)
static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
int i, ret;
 
/* bit of a hack to find the actual last used pd */
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i];
ret = gen8_write_pdp(ring, i, addr);
ret = gen8_write_pdp(req, i, pd_daddr);
if (ret)
return ret;
}
255,31 → 680,52
return 0;
}
 
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
 
static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
uint64_t length,
bool use_scratch)
gen8_pte_t scratch_pte)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
gen8_pte_t *pt_vaddr;
unsigned pdpe = gen8_pdpe_index(start);
unsigned pde = gen8_pde_index(start);
unsigned pte = gen8_pte_index(start);
unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
 
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
I915_CACHE_LLC, use_scratch);
if (WARN_ON(!pdp))
return;
 
while (num_entries) {
struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
struct i915_page_directory *pd;
struct i915_page_table *pt;
 
if (WARN_ON(!pdp->page_directory[pdpe]))
break;
 
pd = pdp->page_directory[pdpe];
 
if (WARN_ON(!pd->page_table[pde]))
break;
 
pt = pd->page_table[pde];
 
if (WARN_ON(!px_page(pt)))
break;
 
last_pte = pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE)
last_pte = GEN8_PTES_PER_PAGE;
if (last_pte > GEN8_PTES)
last_pte = GEN8_PTES;
 
pt_vaddr = kmap_atomic(page_table);
pt_vaddr = kmap_px(pt);
 
for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
286,108 → 732,240
num_entries--;
}
 
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt);
 
pte = 0;
if (++pde == GEN8_PDES_PER_PAGE) {
pdpe++;
if (++pde == I915_PDES) {
if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
break;
pde = 0;
}
}
}
 
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
uint64_t length,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_gtt_pte_t *pt_vaddr;
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
struct sg_page_iter sg_iter;
gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, use_scratch);
 
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
scratch_pte);
} else {
uint64_t templ4, pml4e;
struct i915_page_directory_pointer *pdp;
 
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
scratch_pte);
}
}
}
 
static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
struct sg_page_iter *sg_iter,
uint64_t start,
enum i915_cache_level cache_level)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_pte_t *pt_vaddr;
unsigned pdpe = gen8_pdpe_index(start);
unsigned pde = gen8_pde_index(start);
unsigned pte = gen8_pte_index(start);
 
pt_vaddr = NULL;
 
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
break;
while (__sg_page_iter_next(sg_iter)) {
if (pt_vaddr == NULL) {
struct i915_page_directory *pd = pdp->page_directory[pdpe];
struct i915_page_table *pt = pd->page_table[pde];
pt_vaddr = kmap_px(pt);
}
 
if (pt_vaddr == NULL)
pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
 
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
cache_level, true);
if (++pte == GEN8_PTES_PER_PAGE) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == GEN8_PDES_PER_PAGE) {
pdpe++;
if (++pde == I915_PDES) {
if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
break;
pde = 0;
}
pte = 0;
}
}
if (pt_vaddr) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
 
if (pt_vaddr)
kunmap_px(ppgtt, pt_vaddr);
}
 
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
enum i915_cache_level cache_level,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct sg_page_iter sg_iter;
 
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
 
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level);
} else {
struct i915_page_directory_pointer *pdp;
uint64_t templ4, pml4e;
uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
 
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
start, cache_level);
}
}
}
 
static void gen8_free_page_tables(struct page **pt_pages)
static void gen8_free_page_tables(struct drm_device *dev,
struct i915_page_directory *pd)
{
int i;
 
if (pt_pages == NULL)
if (!px_page(pd))
return;
 
// for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
// if (pt_pages[i])
// __free_pages(pt_pages[i], 0);
for_each_set_bit(i, pd->used_pdes, I915_PDES) {
if (WARN_ON(!pd->page_table[i]))
continue;
 
free_pt(dev, pd->page_table[i]);
pd->page_table[i] = NULL;
}
}
 
static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
static int gen8_init_scratch(struct i915_address_space *vm)
{
struct drm_device *dev = vm->dev;
 
vm->scratch_page = alloc_scratch_page(dev);
if (IS_ERR(vm->scratch_page))
return PTR_ERR(vm->scratch_page);
 
vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) {
free_scratch_page(dev, vm->scratch_page);
return PTR_ERR(vm->scratch_pt);
}
 
vm->scratch_pd = alloc_pd(dev);
if (IS_ERR(vm->scratch_pd)) {
free_pt(dev, vm->scratch_pt);
free_scratch_page(dev, vm->scratch_page);
return PTR_ERR(vm->scratch_pd);
}
 
if (USES_FULL_48BIT_PPGTT(dev)) {
vm->scratch_pdp = alloc_pdp(dev);
if (IS_ERR(vm->scratch_pdp)) {
free_pd(dev, vm->scratch_pd);
free_pt(dev, vm->scratch_pt);
free_scratch_page(dev, vm->scratch_page);
return PTR_ERR(vm->scratch_pdp);
}
}
 
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
if (USES_FULL_48BIT_PPGTT(dev))
gen8_initialize_pdp(vm, vm->scratch_pdp);
 
return 0;
}
 
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
enum vgt_g2v_type msg;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int offset = vgtif_reg(pdp0_lo);
int i;
 
for (i = 0; i < ppgtt->num_pd_pages; i++) {
gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
kfree(ppgtt->gen8_pt_pages[i]);
kfree(ppgtt->gen8_pt_dma_addr[i]);
if (USES_FULL_48BIT_PPGTT(dev)) {
u64 daddr = px_dma(&ppgtt->pml4);
 
I915_WRITE(offset, lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr));
 
msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
} else {
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
 
I915_WRITE(offset, lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr));
 
offset += 8;
}
 
// __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
}
 
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
I915_WRITE(vgtif_reg(g2v_notify), msg);
 
return 0;
}
 
static void gen8_free_scratch(struct i915_address_space *vm)
{
struct pci_dev *hwdev = ppgtt->base.dev->pdev;
int i, j;
struct drm_device *dev = vm->dev;
 
for (i = 0; i < ppgtt->num_pd_pages; i++) {
/* TODO: In the future we'll support sparse mappings, so this
* will have to change. */
if (!ppgtt->pd_dma_addr[i])
if (USES_FULL_48BIT_PPGTT(dev))
free_pdp(dev, vm->scratch_pdp);
free_pd(dev, vm->scratch_pd);
free_pt(dev, vm->scratch_pt);
free_scratch_page(dev, vm->scratch_page);
}
 
static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
struct i915_page_directory_pointer *pdp)
{
int i;
 
for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
if (WARN_ON(!pdp->page_directory[i]))
continue;
 
pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
gen8_free_page_tables(dev, pdp->page_directory[i]);
free_pd(dev, pdp->page_directory[i]);
}
 
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
if (addr)
pci_unmap_page(hwdev, addr, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
free_pdp(dev, pdp);
}
 
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
int i;
 
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
if (WARN_ON(!ppgtt->pml4.pdps[i]))
continue;
 
gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
}
 
cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
}
 
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
395,277 → 973,684
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
 
gen8_ppgtt_unmap_pages(ppgtt);
gen8_ppgtt_free(ppgtt);
if (intel_vgpu_active(vm->dev))
gen8_ppgtt_notify_vgt(ppgtt, false);
 
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
else
gen8_ppgtt_cleanup_4lvl(ppgtt);
 
gen8_free_scratch(vm);
}
 
static struct page **__gen8_alloc_page_tables(void)
/**
* gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
* @vm: Master vm structure.
* @pd: Page directory for this address range.
* @start: Starting virtual address to begin allocations.
* @length: Size of the allocations.
* @new_pts: Bitmap set by function with new allocations. Likely used by the
* caller to free on error.
*
* Allocate the required number of page tables. Extremely similar to
* gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
* the page directory boundary (instead of the page directory pointer). That
* boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
* possible, and likely that the caller will need to use multiple calls of this
* function to achieve the appropriate allocation.
*
* Return: 0 if success; negative error code otherwise.
*/
static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
struct i915_page_directory *pd,
uint64_t start,
uint64_t length,
unsigned long *new_pts)
{
struct page **pt_pages;
int i;
struct drm_device *dev = vm->dev;
struct i915_page_table *pt;
uint64_t temp;
uint32_t pde;
 
pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
if (!pt_pages)
return ERR_PTR(-ENOMEM);
gen8_for_each_pde(pt, pd, start, length, temp, pde) {
/* Don't reallocate page tables */
if (test_bit(pde, pd->used_pdes)) {
/* Scratch is never allocated this way */
WARN_ON(pt == vm->scratch_pt);
continue;
}
 
for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
pt_pages[i] = alloc_page(GFP_KERNEL);
if (!pt_pages[i])
goto bail;
pt = alloc_pt(dev);
if (IS_ERR(pt))
goto unwind_out;
 
gen8_initialize_pt(vm, pt);
pd->page_table[pde] = pt;
__set_bit(pde, new_pts);
trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
}
 
return pt_pages;
return 0;
 
bail:
gen8_free_page_tables(pt_pages);
kfree(pt_pages);
return ERR_PTR(-ENOMEM);
unwind_out:
for_each_set_bit(pde, new_pts, I915_PDES)
free_pt(dev, pd->page_table[pde]);
 
return -ENOMEM;
}
 
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
/**
* gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
* @vm: Master vm structure.
* @pdp: Page directory pointer for this address range.
* @start: Starting virtual address to begin allocations.
* @length: Size of the allocations.
* @new_pds: Bitmap set by function with new allocations. Likely used by the
* caller to free on error.
*
* Allocate the required number of page directories starting at the pde index of
* @start, and ending at the pde index @start + @length. This function will skip
* over already allocated page directories within the range, and only allocate
* new ones, setting the appropriate pointer within the pdp as well as the
* correct position in the bitmap @new_pds.
*
* The function will only allocate the pages within the range for a give page
* directory pointer. In other words, if @start + @length straddles a virtually
* addressed PDP boundary (512GB for 4k pages), there will be more allocations
* required by the caller, This is not currently possible, and the BUG in the
* code will prevent it.
*
* Return: 0 if success; negative error code otherwise.
*/
static int
gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
uint64_t length,
unsigned long *new_pds)
{
struct page **pt_pages[GEN8_LEGACY_PDPS];
int i, ret;
struct drm_device *dev = vm->dev;
struct i915_page_directory *pd;
uint64_t temp;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev);
 
for (i = 0; i < max_pdp; i++) {
pt_pages[i] = __gen8_alloc_page_tables();
if (IS_ERR(pt_pages[i])) {
ret = PTR_ERR(pt_pages[i]);
WARN_ON(!bitmap_empty(new_pds, pdpes));
 
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
if (test_bit(pdpe, pdp->used_pdpes))
continue;
 
pd = alloc_pd(dev);
if (IS_ERR(pd))
goto unwind_out;
 
gen8_initialize_pd(vm, pd);
pdp->page_directory[pdpe] = pd;
__set_bit(pdpe, new_pds);
trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
}
}
 
/* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
* "atomic" - for cleanup purposes.
*/
for (i = 0; i < max_pdp; i++)
ppgtt->gen8_pt_pages[i] = pt_pages[i];
 
return 0;
 
unwind_out:
while (i--) {
gen8_free_page_tables(pt_pages[i]);
kfree(pt_pages[i]);
}
for_each_set_bit(pdpe, new_pds, pdpes)
free_pd(dev, pdp->page_directory[pdpe]);
 
return ret;
return -ENOMEM;
}
 
static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
/**
* gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
* @vm: Master vm structure.
* @pml4: Page map level 4 for this address range.
* @start: Starting virtual address to begin allocations.
* @length: Size of the allocations.
* @new_pdps: Bitmap set by function with new allocations. Likely used by the
* caller to free on error.
*
* Allocate the required number of page directory pointers. Extremely similar to
* gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
* The main difference is here we are limited by the pml4 boundary (instead of
* the page directory pointer).
*
* Return: 0 if success; negative error code otherwise.
*/
static int
gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
struct i915_pml4 *pml4,
uint64_t start,
uint64_t length,
unsigned long *new_pdps)
{
int i;
struct drm_device *dev = vm->dev;
struct i915_page_directory_pointer *pdp;
uint64_t temp;
uint32_t pml4e;
 
for (i = 0; i < ppgtt->num_pd_pages; i++) {
ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->gen8_pt_dma_addr[i])
return -ENOMEM;
WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
 
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
pdp = alloc_pdp(dev);
if (IS_ERR(pdp))
goto unwind_out;
 
gen8_initialize_pdp(vm, pdp);
pml4->pdps[pml4e] = pdp;
__set_bit(pml4e, new_pdps);
trace_i915_page_directory_pointer_entry_alloc(vm,
pml4e,
start,
GEN8_PML4E_SHIFT);
}
}
 
return 0;
 
unwind_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
free_pdp(dev, pml4->pdps[pml4e]);
 
return -ENOMEM;
}
 
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
static void
free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
{
// ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
if (!ppgtt->pd_pages)
kfree(new_pts);
kfree(new_pds);
}
 
/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
* of these are based on the number of PDPEs in the system.
*/
static
int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
unsigned long **new_pts,
uint32_t pdpes)
{
unsigned long *pds;
unsigned long *pts;
 
pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
if (!pds)
return -ENOMEM;
 
// ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
GFP_TEMPORARY);
if (!pts)
goto err_out;
 
*new_pds = pds;
*new_pts = pts;
 
return 0;
 
err_out:
free_gen8_temp_bitmaps(pds, pts);
return -ENOMEM;
}
 
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
}
 
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
uint64_t start,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
unsigned long *new_page_dirs, *new_page_tables;
struct drm_device *dev = vm->dev;
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
uint64_t temp;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev);
int ret;
 
ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
/* Wrap is never okay since we can only represent 48b, and we don't
* actually use the other side of the canonical address space.
*/
if (WARN_ON(start + length < start))
return -ENODEV;
 
if (WARN_ON(start + length > vm->total))
return -ENODEV;
 
ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
if (ret)
return ret;
 
ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
/* Do the allocations first so we can easily bail out */
ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
new_page_dirs);
if (ret) {
// __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
return ret;
}
 
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
 
ret = gen8_ppgtt_allocate_dma(ppgtt);
/* For every page directory referenced, allocate page tables */
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
if (ret)
gen8_ppgtt_free(ppgtt);
goto err_out;
}
 
start = orig_start;
length = orig_length;
 
/* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_pde_t *const page_directory = kmap_px(pd);
struct i915_page_table *pt;
uint64_t pd_len = length;
uint64_t pd_start = start;
uint32_t pde;
 
/* Every pd should be allocated, we just did that above. */
WARN_ON(!pd);
 
gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
/* Same reasoning as pd */
WARN_ON(!pt);
WARN_ON(!pd_len);
WARN_ON(!gen8_pte_count(pd_start, pd_len));
 
/* Set our used ptes within the page table */
bitmap_set(pt->used_ptes,
gen8_pte_index(pd_start),
gen8_pte_count(pd_start, pd_len));
 
/* Our pde is now pointing to the pagetable, pt */
__set_bit(pde, pd->used_pdes);
 
/* Map the PDE to the page table */
page_directory[pde] = gen8_pde_encode(px_dma(pt),
I915_CACHE_LLC);
trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
gen8_pte_index(start),
gen8_pte_count(start, length),
GEN8_PTES);
 
/* NB: We haven't yet mapped ptes to pages. At this
* point we're still relying on insert_entries() */
}
 
kunmap_px(ppgtt, page_directory);
__set_bit(pdpe, pdp->used_pdpes);
gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
}
 
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
return 0;
 
err_out:
while (pdpe--) {
for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES)
free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
}
 
for_each_set_bit(pdpe, new_page_dirs, pdpes)
free_pd(dev, pdp->page_directory[pdpe]);
 
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
return ret;
}
 
static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
const int pd)
static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
struct i915_pml4 *pml4,
uint64_t start,
uint64_t length)
{
dma_addr_t pd_addr;
int ret;
DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_directory_pointer *pdp;
uint64_t temp, pml4e;
int ret = 0;
 
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
&ppgtt->pd_pages[pd], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
/* Do the pml4 allocations first, so we don't need to track the newly
* allocated tables below the pdp */
bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
 
// ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
// if (ret)
// return ret;
/* The pagedirectory and pagetable allocations are done in the shared 3
* and 4 level code. Just allocate the pdps.
*/
ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
new_pdps);
if (ret)
return ret;
 
ppgtt->pd_dma_addr[pd] = pd_addr;
WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
"The allocation has spanned more than 512GB. "
"It is highly likely this is incorrect.");
 
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
WARN_ON(!pdp);
 
ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
if (ret)
goto err_out;
 
gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
}
 
bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
GEN8_PML4ES_PER_PML4);
 
return 0;
 
err_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
 
return ret;
}
 
static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
const int pd,
const int pt)
static int gen8_alloc_va_range(struct i915_address_space *vm,
uint64_t start, uint64_t length)
{
dma_addr_t pt_addr;
struct page *p;
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
 
if (USES_FULL_48BIT_PPGTT(vm->dev))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
else
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
}
 
static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
uint64_t start, uint64_t length,
gen8_pte_t scratch_pte,
struct seq_file *m)
{
struct i915_page_directory *pd;
uint64_t temp;
uint32_t pdpe;
 
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
struct i915_page_table *pt;
uint64_t pd_len = length;
uint64_t pd_start = start;
uint32_t pde;
 
if (!test_bit(pdpe, pdp->used_pdpes))
continue;
 
seq_printf(m, "\tPDPE #%d\n", pdpe);
gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
uint32_t pte;
gen8_pte_t *pt_vaddr;
 
if (!test_bit(pde, pd->used_pdes))
continue;
 
pt_vaddr = kmap_px(pt);
for (pte = 0; pte < GEN8_PTES; pte += 4) {
uint64_t va =
(pdpe << GEN8_PDPE_SHIFT) |
(pde << GEN8_PDE_SHIFT) |
(pte << GEN8_PTE_SHIFT);
int i;
bool found = false;
 
for (i = 0; i < 4; i++)
if (pt_vaddr[pte + i] != scratch_pte)
found = true;
if (!found)
continue;
 
seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
for (i = 0; i < 4; i++) {
if (pt_vaddr[pte + i] != scratch_pte)
seq_printf(m, " %llx", pt_vaddr[pte + i]);
else
seq_puts(m, " SCRATCH ");
}
seq_puts(m, "\n");
}
/* don't use kunmap_px, it could trigger
* an unnecessary flush.
*/
kunmap_atomic(pt_vaddr);
}
}
}
 
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->base;
uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total;
gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true);
 
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t templ4, pml4e;
struct i915_pml4 *pml4 = &ppgtt->pml4;
struct i915_page_directory_pointer *pdp;
 
gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es))
continue;
 
seq_printf(m, " PML4E #%llu\n", pml4e);
gen8_dump_pdp(pdp, start, length, scratch_pte, m);
}
}
}
 
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
unsigned long *new_page_dirs, *new_page_tables;
uint32_t pdpes = I915_PDPES_PER_PDP(dev);
int ret;
 
p = ppgtt->gen8_pt_pages[pd][pt];
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
// ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
// if (ret)
// return ret;
/* We allocate temp bitmap for page tables for no gain
* but as this is for init only, lets keep the things simple
*/
ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
if (ret)
return ret;
 
ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
/* Allocate for all pdps regardless of how the ppgtt
* was defined.
*/
ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
0, 1ULL << 32,
new_page_dirs);
if (!ret)
*ppgtt->pdp.used_pdpes = *new_page_dirs;
 
return 0;
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
 
return ret;
}
 
/**
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
* space.
*
* FIXME: split allocation into smaller pieces. For now we only ever do this
* once, but with full PPGTT, the multiple contiguous allocations will be bad.
* TODO: Do something with the size parameter
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
int i, j, ret;
int ret;
 
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
 
/* 1. Do all our allocations for page directories and page tables. */
ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
ret = gen8_init_scratch(&ppgtt->base);
if (ret)
return ret;
 
/*
* 2. Create DMA mappings for the page directories and page tables.
*/
for (i = 0; i < max_pdp; i++) {
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
ppgtt->base.start = 0;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.allocate_va_range = gen8_alloc_va_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->debug_dump = gen8_dump_ppgtt;
 
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
if (ret)
goto bail;
goto free_scratch;
 
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
 
ppgtt->base.total = 1ULL << 48;
ppgtt->switch_mm = gen8_48b_mm_switch;
} else {
ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
if (ret)
goto bail;
}
}
goto free_scratch;
 
/*
* 3. Map all the page directory entires to point to the page tables
* we've allocated.
*
* For now, the PPGTT helper functions all require that the PDEs are
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again.
*/
for (i = 0; i < max_pdp; i++) {
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
ppgtt->base.total = 1ULL << 32;
ppgtt->switch_mm = gen8_legacy_mm_switch;
trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
0, 0,
GEN8_PML4E_SHIFT);
 
if (intel_vgpu_active(ppgtt->base.dev)) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
kunmap_atomic(pd_vaddr);
}
 
ppgtt->switch_mm = gen8_mm_switch;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
if (intel_vgpu_active(ppgtt->base.dev))
gen8_ppgtt_notify_vgt(ppgtt, true);
 
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
ppgtt->num_pd_entries,
(ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
return 0;
 
bail:
gen8_ppgtt_unmap_pages(ppgtt);
gen8_ppgtt_free(ppgtt);
free_scratch:
gen8_free_scratch(&ppgtt->base);
return ret;
}
 
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
gen6_gtt_pte_t __iomem *pd_addr;
struct i915_address_space *vm = &ppgtt->base;
struct i915_page_table *unused;
gen6_pte_t scratch_pte;
uint32_t pd_entry;
uint32_t pte, pde, temp;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true, 0);
 
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
u32 expected;
gen6_pte_t *pt_vaddr;
const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
pd_entry = readl(ppgtt->pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 
if (pd_entry != expected)
seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
pde,
pd_entry,
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
 
pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
 
for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
(pde * PAGE_SIZE * GEN6_PTES) +
(pte * PAGE_SIZE);
int i;
bool found = false;
for (i = 0; i < 4; i++)
if (pt_vaddr[pte + i] != scratch_pte)
found = true;
if (!found)
continue;
 
WARN_ON(ppgtt->pd_offset & 0x3f);
pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
for (i = 0; i < 4; i++) {
if (pt_vaddr[pte + i] != scratch_pte)
seq_printf(m, " %08x", pt_vaddr[pte + i]);
else
seq_puts(m, " SCRATCH ");
}
seq_puts(m, "\n");
}
kunmap_px(ppgtt, pt_vaddr);
}
}
 
pt_addr = ppgtt->pt_dma_addr[i];
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
/* Write pde (index) from the page directory @pd to the page table @pt */
static void gen6_write_pde(struct i915_page_directory *pd,
const int pde, struct i915_page_table *pt)
{
/* Caller needs to make sure the write completes if necessary */
struct i915_hw_ppgtt *ppgtt =
container_of(pd, struct i915_hw_ppgtt, pd);
u32 pd_entry;
 
pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
pd_entry |= GEN6_PDE_VALID;
 
writel(pd_entry, pd_addr + i);
writel(pd_entry, ppgtt->pd_addr + pde);
}
readl(pd_addr);
 
/* Write all the page tables found in the ppgtt structure to incrementing page
* directories. */
static void gen6_write_page_range(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd,
uint32_t start, uint32_t length)
{
struct i915_page_table *pt;
uint32_t pde, temp;
 
gen6_for_each_pde(pt, pd, start, length, temp, pde)
gen6_write_pde(pd, pde, pt);
 
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
readl(dev_priv->gtt.gsm);
}
 
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
BUG_ON(ppgtt->pd_offset & 0x3f);
BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
 
return (ppgtt->pd_offset / 64) << 16;
return (ppgtt->pd.base.ggtt_offset / 64) << 16;
}
 
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring)
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
680,17 → 1665,29
return 0;
}
 
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
 
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
return 0;
}
 
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring)
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
704,7 → 1701,7
 
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (ring->id != RCS) {
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
713,8 → 1710,9
}
 
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring)
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
734,8 → 1732,9
int j;
 
for_each_ring(ring, dev_priv, j) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
 
791,26 → 1790,27
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
gen6_pte_t *pt_vaddr, scratch_pte;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned first_pte = first_entry % GEN6_PTES;
unsigned last_pte, i;
 
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true, 0);
 
while (num_entries) {
last_pte = first_pte + num_entries;
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
if (last_pte > GEN6_PTES)
last_pte = GEN6_PTES;
 
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
 
kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt_vaddr);
 
num_entries -= last_pte - first_pte;
first_pte = 0;
825,23 → 1825,23
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
struct sg_page_iter sg_iter;
 
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);
 
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
act_pt++;
act_pte = 0;
848,44 → 1848,146
}
}
if (pt_vaddr)
kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt_vaddr);
}
 
static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
int i;
DECLARE_BITMAP(new_page_tables, I915_PDES);
struct drm_device *dev = vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_table *pt;
uint32_t start, length, start_save, length_save;
uint32_t pde, temp;
int ret;
 
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
if (WARN_ON(start_in + length_in > ppgtt->base.total))
return -ENODEV;
 
start = start_save = start_in;
length = length_save = length_in;
 
bitmap_zero(new_page_tables, I915_PDES);
 
/* The allocation is done in two stages so that we can bail out with
* minimal amount of pain. The first stage finds new page tables that
* need allocation. The second stage marks use ptes within the page
* tables.
*/
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
if (pt != vm->scratch_pt) {
WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
continue;
}
 
/* We've already allocated a page table */
WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
 
pt = alloc_pt(dev);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
goto unwind_out;
}
 
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
gen6_initialize_pt(vm, pt);
 
ppgtt->pd.page_table[pde] = pt;
__set_bit(pde, new_page_tables);
trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
}
 
start = start_save;
length = length_save;
 
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
 
bitmap_zero(tmp_bitmap, GEN6_PTES);
bitmap_set(tmp_bitmap, gen6_pte_index(start),
gen6_pte_count(start, length));
 
if (__test_and_clear_bit(pde, new_page_tables))
gen6_write_pde(&ppgtt->pd, pde, pt);
 
trace_i915_page_table_entry_map(vm, pde, pt,
gen6_pte_index(start),
gen6_pte_count(start, length),
GEN6_PTES);
bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
GEN6_PTES);
}
 
WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
 
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
readl(dev_priv->gtt.gsm);
 
mark_tlbs_dirty(ppgtt);
return 0;
 
unwind_out:
for_each_set_bit(pde, new_page_tables, I915_PDES) {
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
 
ppgtt->pd.page_table[pde] = vm->scratch_pt;
free_pt(vm->dev, pt);
}
 
mark_tlbs_dirty(ppgtt);
return ret;
}
 
static int gen6_init_scratch(struct i915_address_space *vm)
{
int i;
struct drm_device *dev = vm->dev;
 
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
vm->scratch_page = alloc_scratch_page(dev);
if (IS_ERR(vm->scratch_page))
return PTR_ERR(vm->scratch_page);
 
vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) {
free_scratch_page(dev, vm->scratch_page);
return PTR_ERR(vm->scratch_pt);
}
 
gen6_initialize_pt(vm, vm->scratch_pt);
 
return 0;
}
 
static void gen6_free_scratch(struct i915_address_space *vm)
{
struct drm_device *dev = vm->dev;
 
free_pt(dev, vm->scratch_pt);
free_scratch_page(dev, vm->scratch_page);
}
 
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_table *pt;
uint32_t pde;
 
drm_mm_remove_node(&ppgtt->node);
 
gen6_ppgtt_unmap_pages(ppgtt);
gen6_ppgtt_free(ppgtt);
gen6_for_all_pdes(pt, ppgtt, pde) {
if (pt != vm->scratch_pt)
free_pt(ppgtt->base.dev, pt);
}
 
gen6_free_scratch(vm);
}
 
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool retried = false;
896,6 → 1998,11
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
 
ret = gen6_init_scratch(vm);
if (ret)
return ret;
 
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
909,87 → 2016,41
0, dev_priv->gtt.base.total,
0);
if (ret)
return ret;
goto err_out;
 
retried = true;
goto alloc;
}
 
if (ret)
goto err_out;
 
 
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
 
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
return 0;
 
err_out:
gen6_free_scratch(vm);
return ret;
}
 
static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
int i;
 
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
 
if (!ppgtt->pt_pages)
return -ENOMEM;
 
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i]) {
gen6_ppgtt_free(ppgtt);
return -ENOMEM;
}
}
 
return 0;
}
 
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
int ret;
 
ret = gen6_ppgtt_allocate_page_directories(ppgtt);
if (ret)
return ret;
 
ret = gen6_ppgtt_allocate_page_tables(ppgtt);
if (ret) {
drm_mm_remove_node(&ppgtt->node);
return ret;
return gen6_ppgtt_allocate_page_directories(ppgtt);
}
 
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->pt_dma_addr) {
drm_mm_remove_node(&ppgtt->node);
gen6_ppgtt_free(ppgtt);
return -ENOMEM;
}
 
return 0;
}
 
static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint64_t start, uint64_t length)
{
struct drm_device *dev = ppgtt->base.dev;
int i;
struct i915_page_table *unused;
uint32_t pde, temp;
 
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
 
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
PCI_DMA_BIDIRECTIONAL);
 
// if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
// gen6_ppgtt_unmap_pages(ppgtt);
// return -EIO;
// }
 
ppgtt->pt_dma_addr[i] = pt_addr;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
}
 
return 0;
}
 
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
1006,35 → 2067,39
} else
BUG();
 
if (intel_vgpu_active(dev))
ppgtt->switch_mm = vgpu_mm_switch;
 
ret = gen6_ppgtt_alloc(ppgtt);
if (ret)
return ret;
 
ret = gen6_ppgtt_setup_page_tables(ppgtt);
if (ret) {
gen6_ppgtt_free(ppgtt);
return ret;
}
 
ppgtt->base.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
// ppgtt->debug_dump = gen6_dump_ppgtt;
ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
 
ppgtt->pd_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
 
gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
 
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
 
gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
ppgtt->pd_offset << 10);
ppgtt->pd.base.ggtt_offset << 10);
 
return 0;
}
1041,18 → 2106,24
 
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
ppgtt->base.dev = dev;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 
if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt);
else if (IS_GEN8(dev) || IS_GEN9(dev))
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
return gen8_ppgtt_init(ppgtt);
}
 
static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv)
{
drm_mm_init(&vm->mm, vm->start, vm->total);
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
 
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1061,9 → 2132,7
ret = __hw_ppgtt_init(dev, ppgtt);
if (ret == 0) {
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
i915_init_vm(dev_priv, &ppgtt->base);
i915_address_space_init(&ppgtt->base, dev_priv);
}
 
return ret;
1071,11 → 2140,6
 
int i915_ppgtt_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i, ret = 0;
 
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
1092,18 → 2156,25
else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev);
else
WARN_ON(1);
MISSING_CASE(INTEL_INFO(dev)->gen);
 
if (ppgtt) {
for_each_ring(ring, dev_priv, i) {
ret = ppgtt->switch_mm(ppgtt, ring);
if (ret != 0)
return ret;
return 0;
}
 
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
{
struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
if (i915.enable_execlists)
return 0;
 
if (!ppgtt)
return 0;
 
return ppgtt->switch_mm(ppgtt, req);
}
 
return ret;
}
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
1145,32 → 2216,11
kfree(ppgtt);
}
 
static void
ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
flags |= PTE_READ_ONLY;
 
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
cache_level, flags);
}
 
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm,
vma->node.start,
vma->obj->base.size,
true);
}
 
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline bool needs_idle_maps(struct drm_device *dev)
static bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
/* Query intel_iommu to see if we need the workaround. Presumably that
1263,64 → 2313,8
i915_ggtt_flush(dev_priv);
}
 
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
 
i915_check_and_clear_faults(dev);
 
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start,
dev_priv->gtt.base.total,
true);
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj,
&dev_priv->gtt.base);
if (!vma)
continue;
 
i915_gem_clflush_object(obj, obj->pin_display);
/* The bind_vma code tries to be smart about tracking mappings.
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*/
vma->bound &= ~GLOBAL_BIND;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
}
 
 
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
 
return;
}
 
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */
if (i915_is_ggtt(vm)) {
if (dev_priv->mm.aliasing_ppgtt)
gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
continue;
}
 
gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
}
 
i915_ggtt_flush(dev_priv);
}
 
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
if (obj->has_dma_mapping)
return 0;
 
if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL))
1329,7 → 2323,7
return 0;
}
 
static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
#ifdef writeq
writeq(pte, addr);
1346,8 → 2340,8
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
gen8_gtt_pte_t __iomem *gtt_entries =
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
gen8_pte_t __iomem *gtt_entries =
(gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
1392,8 → 2386,8
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
gen6_pte_t __iomem *gtt_entries =
(gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
1431,8 → 2425,8
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
gen8_pte_t scratch_pte, __iomem *gtt_base =
(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
 
1441,7 → 2435,7
first_entry, num_entries, max_entries))
num_entries = max_entries;
 
scratch_pte = gen8_pte_encode(vm->scratch.addr,
scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC,
use_scratch);
for (i = 0; i < num_entries; i++)
1457,8 → 2451,8
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
gen6_pte_t scratch_pte, __iomem *gtt_base =
(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
 
1467,7 → 2461,8
first_entry, num_entries, max_entries))
num_entries = max_entries;
 
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, use_scratch, 0);
 
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
1474,18 → 2469,16
readl(gtt_base);
}
 
 
static void i915_ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
const unsigned long entry = vma->node.start >> PAGE_SHIFT;
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
 
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
vma->bound = GLOBAL_BIND;
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
 
}
 
static void i915_ggtt_clear_range(struct i915_address_space *vm,
1498,17 → 2491,37
intel_gtt_clear_range(first_entry, num_entries);
}
 
static void i915_ggtt_unbind_vma(struct i915_vma *vma)
static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
const unsigned int first = vma->node.start >> PAGE_SHIFT;
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
 
BUG_ON(!i915_is_ggtt(vma->vm));
vma->bound = 0;
intel_gtt_clear_range(first, size);
ret = i915_get_ggtt_vma_pages(vma);
if (ret)
return ret;
 
/* Currently applicable only to VLV */
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
 
vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
vma->node.start,
cache_level, pte_flags);
 
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
vma->bound |= GLOBAL_BIND | LOCAL_BIND;
 
return 0;
}
 
static void ggtt_bind_vma(struct i915_vma *vma,
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
1515,42 → 2528,34
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
struct sg_table *pages = obj->pages;
u32 pte_flags = 0;
int ret;
 
ret = i915_get_ggtt_vma_pages(vma);
if (ret)
return ret;
pages = vma->ggtt_view.pages;
 
/* Currently applicable only to VLV */
if (obj->gt_ro)
flags |= PTE_READ_ONLY;
pte_flags |= PTE_READ_ONLY;
 
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
*
* If there is an aliasing PPGTT it is anecdotally faster, so use that
* instead if none of the above hold true.
*
* NB: A global mapping should only be needed for special regions like
* "gtt mappable", SNB errata, or if specified via special execbuf
* flags. At all other times, the GPU will use the aliasing PPGTT.
*/
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
 
if (flags & GLOBAL_BIND) {
vma->vm->insert_entries(vma->vm, pages,
vma->node.start,
cache_level, flags);
vma->bound |= GLOBAL_BIND;
cache_level, pte_flags);
}
}
 
if (dev_priv->mm.aliasing_ppgtt &&
(!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
if (flags & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
appgtt->base.insert_entries(&appgtt->base, pages,
vma->node.start,
cache_level, flags);
vma->bound |= LOCAL_BIND;
cache_level, pte_flags);
}
 
return 0;
}
 
static void ggtt_unbind_vma(struct i915_vma *vma)
1558,22 → 2563,24
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
const uint64_t size = min_t(uint64_t,
obj->base.size,
vma->node.size);
 
if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm,
vma->node.start,
obj->base.size,
size,
true);
vma->bound &= ~GLOBAL_BIND;
}
 
if (vma->bound & LOCAL_BIND) {
if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
 
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
obj->base.size,
size,
true);
vma->bound &= ~LOCAL_BIND;
}
}
 
1585,9 → 2592,7
 
interruptible = do_idling(dev_priv);
 
if (!obj->has_dma_mapping)
dma_unmap_sg(&dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL);
 
undo_idling(dev_priv, interruptible);
1595,8 → 2600,8
 
static void i915_gtt_color_adjust(struct drm_mm_node *node,
unsigned long color,
unsigned long *start,
unsigned long *end)
u64 *start,
u64 *end)
{
if (node->color != color)
*start += 4096;
1611,9 → 2616,9
}
 
static int i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
u64 start,
u64 mappable_end,
u64 end)
{
/* Let GEM Manage all of the aperture.
*
1633,16 → 2638,28
 
BUG_ON(mappable_end > end);
 
/* Subtract the guard page ... */
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
ggtt_vm->start = start;
 
/* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm */
ggtt_vm->total = end - start - PAGE_SIZE;
i915_address_space_init(ggtt_vm, dev_priv);
ggtt_vm->total += PAGE_SIZE;
 
if (intel_vgpu_active(dev)) {
ret = intel_vgt_balloon(dev);
if (ret)
return ret;
}
 
if (!HAS_LLC(dev))
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
 
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
 
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
WARN_ON(i915_gem_obj_ggtt_bound(obj));
1652,11 → 2669,10
return ret;
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
}
 
dev_priv->gtt.base.start = start;
dev_priv->gtt.base.total = end - start;
 
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
1676,10 → 2692,29
return -ENOMEM;
 
ret = __hw_ppgtt_init(dev, ppgtt);
if (ret != 0)
if (ret) {
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
return ret;
}
 
if (ppgtt->base.allocate_va_range)
ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
ppgtt->base.total);
if (ret) {
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
return ret;
}
 
ppgtt->base.clear_range(&ppgtt->base,
ppgtt->base.start,
ppgtt->base.total,
true);
 
dev_priv->mm.aliasing_ppgtt = ppgtt;
WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
}
 
return 0;
1688,7 → 2723,7
void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
u64 gtt_size, mappable_size;
 
gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
1708,6 → 2743,9
}
 
if (drm_mm_initialized(&vm->mm)) {
if (intel_vgpu_active(dev))
intel_vgt_deballoon();
 
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
}
1715,50 → 2753,14
vm->cleanup(vm);
}
 
static int setup_scratch_page(struct drm_device *dev)
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct page *page;
dma_addr_t dma_addr;
 
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
set_pages_uc(page, 1);
 
#ifdef CONFIG_INTEL_IOMMU
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, dma_addr))
return -EINVAL;
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->gtt.base.scratch.page = page;
dev_priv->gtt.base.scratch.addr = dma_addr;
 
return 0;
}
 
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct page *page = dev_priv->gtt.base.scratch.page;
 
set_pages_wb(page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(page);
}
 
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
return snb_gmch_ctl << 20;
}
 
static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1774,7 → 2776,7
return bdw_gmch_ctl << 20;
}
 
static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
{
gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
gmch_ctrl &= SNB_GMCH_GGMS_MASK;
1785,7 → 2787,7
return 0;
}
 
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
1792,7 → 2794,7
return snb_gmch_ctl << 25; /* 32 MB units */
}
 
static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
1833,13 → 2835,23
size_t gtt_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_page_scratch *scratch_page;
phys_addr_t gtt_phys_addr;
int ret;
 
/* For Modern GENs the PTEs and register space are split in the BAR */
gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
 
/*
* On BXT writes larger than 64 bit to the GTT pagetable range will be
* dropped. For WC mappings in general we have 64 byte burst writes
* when the WC buffer is flushed, so we can't use it, but have to
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
if (IS_BROXTON(dev))
dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
else
dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
1846,14 → 2858,17
return -ENOMEM;
}
 
ret = setup_scratch_page(dev);
if (ret) {
scratch_page = alloc_scratch_page(dev);
if (IS_ERR(scratch_page)) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(dev_priv->gtt.gsm);
return PTR_ERR(scratch_page);
}
 
return ret;
dev_priv->gtt.base.scratch_page = scratch_page;
 
return 0;
}
 
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
1890,8 → 2905,8
 
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
* write would work. */
I915_WRITE(GEN8_PRIVATE_PAT, pat);
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
}
 
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
1925,18 → 2940,18
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
GEN8_PPAT(7, CHV_PPAT_SNOOP);
 
I915_WRITE(GEN8_PRIVATE_PAT, pat);
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
}
 
static int gen8_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
unsigned long *mappable_end)
u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int gtt_size;
u64 gtt_size;
u16 snb_gmch_ctl;
int ret;
 
1960,9 → 2975,9
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
 
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
*gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
 
if (IS_CHERRYVIEW(dev))
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
1971,15 → 2986,17
 
dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
 
return ret;
}
 
static int gen6_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
unsigned long *mappable_end)
u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int gtt_size;
1993,7 → 3010,7
* a coarse sanity check.
*/
if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
DRM_ERROR("Unknown GMADR size (%lx)\n",
DRM_ERROR("Unknown GMADR size (%llx)\n",
dev_priv->gtt.mappable_end);
return -ENXIO;
}
2005,12 → 3022,14
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
 
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
*gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
ret = ggtt_probe_common(dev, gtt_size);
 
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
 
return ret;
}
2021,14 → 3040,14
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
free_scratch_page(vm->dev, vm->scratch_page);
}
 
static int i915_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
u64 *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
unsigned long *mappable_end)
u64 *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
2042,7 → 3061,10
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
 
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
 
if (unlikely(dev_priv->gtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2082,17 → 3104,17
dev_priv->gtt.base.cleanup = gen6_gmch_remove;
}
 
gtt->base.dev = dev;
 
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
&gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
 
gtt->base.dev = dev;
 
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
DRM_INFO("Memory usable by graphics device = %lluM\n",
gtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
2110,10 → 3132,80
return 0;
}
 
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_vma *vma;
bool flush;
 
i915_check_and_clear_faults(dev);
 
/* First fill our portion of the GTT with scratch pages */
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start,
dev_priv->gtt.base.total,
true);
 
/* Cache flush objects bound into GGTT and rebind them. */
vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (vma->vm != vm)
continue;
 
WARN_ON(i915_vma_bind(vma, obj->cache_level,
PIN_UPDATE));
 
flush = true;
}
 
if (flush)
i915_gem_clflush_object(obj, obj->pin_display);
}
 
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
 
return;
}
 
if (USES_PPGTT(dev)) {
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */
 
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt,
base);
 
if (i915_is_ggtt(vm))
ppgtt = dev_priv->mm.aliasing_ppgtt;
 
gen6_write_page_range(dev_priv, &ppgtt->pd,
0, ppgtt->base.total);
}
}
 
i915_ggtt_flush(dev_priv);
}
 
static struct i915_vma *
__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *ggtt_view)
{
struct i915_vma *vma;
 
if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
return ERR_PTR(-EINVAL);
 
// vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
 
2123,38 → 3215,12
vma->vm = vm;
vma->obj = obj;
 
switch (INTEL_INFO(vm->dev)->gen) {
case 9:
case 8:
case 7:
case 6:
if (i915_is_ggtt(vm)) {
vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma;
} else {
vma->unbind_vma = ppgtt_unbind_vma;
vma->bind_vma = ppgtt_bind_vma;
}
break;
case 5:
case 4:
case 3:
case 2:
BUG_ON(!i915_is_ggtt(vm));
vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma;
break;
default:
BUG();
}
if (i915_is_ggtt(vm))
vma->ggtt_view = *ggtt_view;
 
/* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list);
else {
list_add_tail(&vma->vma_link, &obj->vma_list);
if (!i915_is_ggtt(vm))
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
}
 
return vma;
}
2167,183 → 3233,307
 
vma = i915_gem_obj_to_vma(obj, vm);
if (!vma)
vma = __i915_gem_vma_create(obj, vm);
vma = __i915_gem_vma_create(obj, vm,
i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
 
return vma;
}
 
struct scatterlist *sg_next(struct scatterlist *sg)
struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
if (sg_is_last(sg))
return NULL;
struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
struct i915_vma *vma;
 
sg++;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
if (WARN_ON(!view))
return ERR_PTR(-EINVAL);
 
return sg;
}
vma = i915_gem_obj_to_ggtt_view(obj, view);
 
if (IS_ERR(vma))
return vma;
 
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
bool skip_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
if (!vma)
vma = __i915_gem_vma_create(obj, ggtt, view);
 
if (unlikely(!table->sgl))
return;
return vma;
 
sgl = table->sgl;
while (table->orig_nents) {
unsigned int alloc_size = table->orig_nents;
unsigned int sg_size;
}
 
/*
* If we have more than max_ents segments left,
* then assign 'next' to the sg table after the current one.
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
if (alloc_size > max_ents) {
next = sg_chain_ptr(&sgl[max_ents - 1]);
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
next = NULL;
static struct scatterlist *
rotate_pages(dma_addr_t *in, unsigned int offset,
unsigned int width, unsigned int height,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int column, row;
unsigned int src_idx;
 
if (!sg) {
st->nents = 0;
sg = st->sgl;
}
 
table->orig_nents -= sg_size;
if (!skip_first_chunk) {
kfree(sgl);
skip_first_chunk = false;
for (column = 0; column < width; column++) {
src_idx = width * (height - 1) + column;
for (row = 0; row < height; row++) {
st->nents++;
/* We don't need the pages, but need to initialize
* the entries so the sg list can be happily traversed.
* The only thing we need are DMA addresses.
*/
sg_set_page(sg, NULL, PAGE_SIZE, 0);
sg_dma_address(sg) = in[offset + src_idx];
sg_dma_len(sg) = PAGE_SIZE;
sg = sg_next(sg);
src_idx -= width;
}
sgl = next;
}
 
table->sgl = NULL;
return sg;
}
void sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, NULL);
}
 
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
static struct sg_table *
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
struct drm_i915_gem_object *obj)
{
struct scatterlist *sg, *prv;
unsigned int left;
unsigned int max_ents = SG_MAX_SINGLE_ALLOC;
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
unsigned int size_pages_uv;
struct sg_page_iter sg_iter;
unsigned long i;
dma_addr_t *page_addr_list;
struct sg_table *st;
unsigned int uv_start_page;
struct scatterlist *sg;
int ret = -ENOMEM;
 
#ifndef ARCH_HAS_SG_CHAIN
BUG_ON(nents > max_ents);
#endif
/* Allocate a temporary list of source pages for random access. */
page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
sizeof(dma_addr_t));
if (!page_addr_list)
return ERR_PTR(ret);
 
memset(table, 0, sizeof(*table));
/* Account for UV plane with NV12. */
if (rot_info->pixel_format == DRM_FORMAT_NV12)
size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
else
size_pages_uv = 0;
 
left = nents;
prv = NULL;
do {
unsigned int sg_size, alloc_size = left;
/* Allocate target SG list. */
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
 
if (alloc_size > max_ents) {
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
 
left -= sg_size;
/* Populate source page list from the object. */
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
i++;
}
 
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
* entry of the previous table won't be used for
* linkage. Without this, sg_kfree() may get
* confused.
*/
if (prv)
table->nents = ++table->orig_nents;
/* Rotate the pages. */
sg = rotate_pages(page_addr_list, 0,
rot_info->width_pages, rot_info->height_pages,
st, NULL);
 
goto err;
}
/* Append the UV plane if NV12. */
if (rot_info->pixel_format == DRM_FORMAT_NV12) {
uv_start_page = size_pages;
 
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
/* Check for tile-row un-alignment. */
if (offset_in_page(rot_info->uv_offset))
uv_start_page--;
 
/*
* If this is the first mapping, assign the sg table header.
* If this is not the first mapping, chain previous part.
*/
if (prv)
sg_chain(prv, max_ents, sg);
else
table->sgl = sg;
rot_info->uv_start_page = uv_start_page;
 
/*
* If no more entries after this one, mark the end
*/
if (!left)
sg_mark_end(&sg[sg_size - 1]);
rotate_pages(page_addr_list, uv_start_page,
rot_info->width_pages_uv,
rot_info->height_pages_uv,
st, sg);
}
 
prv = sg;
} while (left);
DRM_DEBUG_KMS(
"Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
obj->base.size, rot_info->pitch, rot_info->height,
rot_info->pixel_format, rot_info->width_pages,
rot_info->height_pages, size_pages + size_pages_uv,
size_pages);
 
return 0;
drm_free_large(page_addr_list);
 
err:
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, NULL);
return st;
 
return -ENOMEM;
err_sg_alloc:
kfree(st);
err_st_alloc:
drm_free_large(page_addr_list);
 
DRM_DEBUG_KMS(
"Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
obj->base.size, ret, rot_info->pitch, rot_info->height,
rot_info->pixel_format, rot_info->width_pages,
rot_info->height_pages, size_pages + size_pages_uv,
size_pages);
return ERR_PTR(ret);
}
 
static struct sg_table *
intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
struct scatterlist *sg;
struct sg_page_iter obj_sg_iter;
int ret = -ENOMEM;
 
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
 
ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
 
sg = st->sgl;
st->nents = 0;
for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
view->params.partial.offset)
{
memset(sgl, 0, sizeof(*sgl) * nents);
#ifdef CONFIG_DEBUG_SG
{
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
if (st->nents >= view->params.partial.size)
break;
 
sg_set_page(sg, NULL, PAGE_SIZE, 0);
sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
sg_dma_len(sg) = PAGE_SIZE;
 
sg = sg_next(sg);
st->nents++;
}
#endif
sg_mark_end(&sgl[nents - 1]);
 
return st;
 
err_sg_alloc:
kfree(st);
err_st_alloc:
return ERR_PTR(ret);
}
 
 
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset)
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
piter->__pg_advance = 0;
piter->__nents = nents;
int ret = 0;
 
piter->sg = sglist;
piter->sg_pgoffset = pgoffset;
if (vma->ggtt_view.pages)
return 0;
 
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->ggtt_view.pages = vma->obj->pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->ggtt_view.pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
vma->ggtt_view.pages =
intel_partial_pages(&vma->ggtt_view, vma->obj);
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
 
if (!vma->ggtt_view.pages) {
DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
vma->ggtt_view.type);
ret = -EINVAL;
} else if (IS_ERR(vma->ggtt_view.pages)) {
ret = PTR_ERR(vma->ggtt_view.pages);
vma->ggtt_view.pages = NULL;
DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
vma->ggtt_view.type, ret);
}
 
static int sg_page_count(struct scatterlist *sg)
{
return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
return ret;
}
 
bool __sg_page_iter_next(struct sg_page_iter *piter)
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
* Note that DMA addresses are also the only part of the SG table we care about.
*/
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
if (!piter->__nents || !piter->sg)
return false;
int ret;
u32 bind_flags;
 
piter->sg_pgoffset += piter->__pg_advance;
piter->__pg_advance = 1;
if (WARN_ON(flags == 0))
return -EINVAL;
 
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
piter->sg_pgoffset -= sg_page_count(piter->sg);
piter->sg = sg_next(piter->sg);
if (!--piter->__nents || !piter->sg)
return false;
bind_flags = 0;
if (flags & PIN_GLOBAL)
bind_flags |= GLOBAL_BIND;
if (flags & PIN_USER)
bind_flags |= LOCAL_BIND;
 
if (flags & PIN_UPDATE)
bind_flags |= vma->bound;
else
bind_flags &= ~vma->bound;
 
if (bind_flags == 0)
return 0;
 
if (vma->bound == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma->vm,
vma->node.start,
vma->node.size,
VM_TO_TRACE_NAME(vma->vm));
 
/* XXX: i915_vma_pin() will fix this +- hack */
vma->pin_count++;
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
vma->node.size);
vma->pin_count--;
if (ret)
return ret;
}
 
return true;
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
 
vma->bound |= bind_flags;
 
return 0;
}
EXPORT_SYMBOL(__sg_page_iter_next);
 
 
/**
* i915_ggtt_view_size - Get the size of a GGTT view.
* @obj: Object the view is of.
* @view: The view in question.
*
* @return The size of the GGTT view in bytes.
*/
size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
return view->rotation_info.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
return obj->base.size;
}
}
/drivers/video/drm/i915/i915_gem_gtt.h
36,13 → 36,15
 
struct drm_i915_file_private;
 
typedef uint32_t gen6_gtt_pte_t;
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
typedef uint32_t gen6_pte_t;
typedef uint64_t gen8_pte_t;
typedef uint64_t gen8_pde_t;
typedef uint64_t gen8_ppgtt_pdpe_t;
typedef uint64_t gen8_ppgtt_pml4e_t;
 
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
 
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
51,9 → 53,16
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
 
#define GEN6_PPGTT_PD_ENTRIES 512
#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
#define I915_PDES 512
#define I915_PDE_MASK (I915_PDES - 1)
#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
 
#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
#define GEN6_PDE_SHIFT 22
#define GEN6_PDE_VALID (1 << 0)
 
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
81,17 → 90,28
* PDPE | PDE | PTE | offset
* The difference as compared to normal x86 3 level page table is the PDPEs are
* programmed via register.
*
* GEN8 48b legacy style address is defined as a 4 level page table:
* 47:39 | 38:30 | 29:21 | 20:12 | 11:0
* PML4E | PDPE | PDE | PTE | offset
*/
#define GEN8_PML4ES_PER_PML4 512
#define GEN8_PML4E_SHIFT 39
#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
#define GEN8_PDPE_SHIFT 30
#define GEN8_PDPE_MASK 0x3
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
* tables */
#define GEN8_PDPE_MASK 0x1ff
#define GEN8_PDE_SHIFT 21
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
#define GEN8_LEGACY_PDPS 4
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
#define GEN8_LEGACY_PDPES 4
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
 
#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
 
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
109,7 → 129,47
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
 
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED,
I915_GGTT_VIEW_PARTIAL,
};
 
struct intel_rotation_info {
unsigned int height;
unsigned int pitch;
unsigned int uv_offset;
uint32_t pixel_format;
uint64_t fb_modifier;
unsigned int width_pages, height_pages;
uint64_t size;
unsigned int width_pages_uv, height_pages_uv;
uint64_t size_uv;
unsigned int uv_start_page;
};
 
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
 
union {
struct {
u64 offset;
unsigned int size;
} partial;
} params;
 
struct sg_table *pages;
 
union {
struct intel_rotation_info rotation_info;
};
};
 
extern const struct i915_ggtt_view i915_ggtt_view_normal;
extern const struct i915_ggtt_view i915_ggtt_view_rotated;
 
enum i915_cache_level;
 
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
126,9 → 186,17
/** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0)
#define LOCAL_BIND (1<<1)
#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4;
 
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
* i915_ggtt_view_type is used to distinguish between those entries.
* The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
* assumed in GEM functions which take no ggtt view parameter.
*/
struct i915_ggtt_view ggtt_view;
 
/** This object's place on the active/inactive lists */
struct list_head mm_list;
 
146,43 → 214,81
 
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
* (via user_pin_count), execbuffer (objects are not allowed multiple
* times for the same batchbuffer), and the framebuffer code. When
* switching/pageflipping, the framebuffer code has at most two buffers
* pinned per crtc.
* users can each hold at most one reference: pwrite/pread, execbuffer
* (objects are not allowed multiple times for the same batchbuffer),
* and the framebuffer code. When switching/pageflipping, the
* framebuffer code has at most two buffers pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
};
 
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
struct i915_page_dma {
struct page *page;
union {
dma_addr_t daddr;
 
/* For gen6/gen7 only. This is the offset in the GGTT
* where the page directory entries for PPGTT begin
*/
uint32_t ggtt_offset;
};
};
 
#define px_base(px) (&(px)->base)
#define px_page(px) (px_base(px)->page)
#define px_dma(px) (px_base(px)->daddr)
 
struct i915_page_scratch {
struct i915_page_dma base;
};
 
struct i915_page_table {
struct i915_page_dma base;
 
unsigned long *used_ptes;
};
 
struct i915_page_directory {
struct i915_page_dma base;
 
unsigned long *used_pdes;
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
};
 
struct i915_page_directory_pointer {
struct i915_page_dma base;
 
unsigned long *used_pdpes;
struct i915_page_directory **page_directory;
};
 
struct i915_pml4 {
struct i915_page_dma base;
 
DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
};
 
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
struct list_head global_link;
unsigned long start; /* Start offset always 0 for dri2 */
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
u64 start; /* Start offset always 0 for dri2 */
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
 
struct {
dma_addr_t addr;
struct page *page;
} scratch;
struct i915_page_scratch *scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
 
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
193,7 → 299,7
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
* last_read_req is NULL while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
202,9 → 308,14
struct list_head inactive_list;
 
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags); /* Create a valid PTE */
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
214,6 → 325,13
uint64_t start,
enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
int (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
};
 
/* The Graphics Translation Table is the way in which GEN hardware translates a
225,9 → 343,10
*/
struct i915_gtt {
struct i915_address_space base;
 
size_t stolen_size; /* Total size of stolen memory */
 
unsigned long mappable_end; /* End offset that we can CPU map */
size_t stolen_usable_size; /* Total size minus BIOS reserved */
u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
 
239,9 → 358,9
int mtrr;
 
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
u64 *mappable_end);
};
 
struct i915_hw_ppgtt {
248,30 → 367,157
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
unsigned long pd_dirty_rings;
union {
struct page **pt_pages;
struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
struct page *pd_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
};
union {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[4];
};
 
struct drm_i915_file_private *file_priv;
 
gen6_pte_t __iomem *pd_addr;
 
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring);
// void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
struct drm_i915_gem_request *req);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
 
/* For each pde iterates over every pde between from start until start + length.
* If start, and start+length are not perfectly divisible, the macro will round
* down, and up as needed. The macro modifies pde, start, and length. Dev is
* only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
* and length = 2G effectively iterates over every PDE in the system.
*
* XXX: temp is not actually needed, but it saves doing the ALIGN operation.
*/
#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
for (iter = gen6_pde_index(start); \
length > 0 && iter < I915_PDES ? \
(pt = (pd)->page_table[iter]), 1 : 0; \
iter++, \
temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
temp = min_t(unsigned, temp, length), \
start += temp, length -= temp)
 
#define gen6_for_all_pdes(pt, ppgtt, iter) \
for (iter = 0; \
pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
iter++)
 
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{
const uint32_t mask = NUM_PTE(pde_shift) - 1;
 
return (address >> PAGE_SHIFT) & mask;
}
 
/* Helper to counts the number of PTEs within the given length. This count
* does not cross a page table boundary, so the max value would be
* GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
*/
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
uint32_t pde_shift)
{
const uint64_t mask = ~((1 << pde_shift) - 1);
uint64_t end;
 
WARN_ON(length == 0);
WARN_ON(offset_in_page(addr|length));
 
end = addr + length;
 
if ((addr & mask) != (end & mask))
return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
 
return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
}
 
static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
{
return (addr >> shift) & I915_PDE_MASK;
}
 
static inline uint32_t gen6_pte_index(uint32_t addr)
{
return i915_pte_index(addr, GEN6_PDE_SHIFT);
}
 
static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
{
return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
}
 
static inline uint32_t gen6_pde_index(uint32_t addr)
{
return i915_pde_index(addr, GEN6_PDE_SHIFT);
}
 
/* Equivalent to the gen6 version, For each pde iterates over every pde
* between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory.
*/
#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
for (iter = gen8_pde_index(start); \
length > 0 && iter < I915_PDES ? \
(pt = (pd)->page_table[iter]), 1 : 0; \
iter++, \
temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
 
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
for (iter = gen8_pdpe_index(start); \
length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \
(pd = (pdp)->page_directory[iter]), 1 : 0; \
iter++, \
temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
 
#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \
for (iter = gen8_pml4e_index(start); \
length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \
(pdp = (pml4)->pdps[iter]), 1 : 0; \
iter++, \
temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
 
static inline uint32_t gen8_pte_index(uint64_t address)
{
return i915_pte_index(address, GEN8_PDE_SHIFT);
}
 
static inline uint32_t gen8_pde_index(uint64_t address)
{
return i915_pde_index(address, GEN8_PDE_SHIFT);
}
 
static inline uint32_t gen8_pdpe_index(uint64_t address)
{
return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
}
 
static inline uint32_t gen8_pml4e_index(uint64_t address)
{
return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
}
 
static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
{
return i915_pte_count(address, length, GEN8_PDE_SHIFT);
}
 
static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
{
return test_bit(n, ppgtt->pdp.used_pdpes) ?
px_dma(ppgtt->pdp.page_directory[n]) :
px_dma(ppgtt->base.scratch_pd);
}
 
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
279,6 → 525,7
 
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
300,4 → 547,22
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 
static inline bool
i915_ggtt_view_equal(const struct i915_ggtt_view *a,
const struct i915_ggtt_view *b)
{
if (WARN_ON(!a || !b))
return false;
 
if (a->type != b->type)
return false;
if (a->type == I915_GGTT_VIEW_PARTIAL)
return !memcmp(&a->params, &b->params, sizeof(a->params));
return true;
}
 
size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
 
#endif
/drivers/video/drm/i915/i915_gem_render_state.c
73,6 → 73,24
return ret;
}
 
/*
* Macro to add commands to auxiliary batch.
* This macro only checks for page overflow before inserting the commands,
* this is sufficient as the null state generator makes the final batch
* with two passes to build command and state separately. At this point
* the size of both are known and it compacts them by relocating the state
* right after the commands taking care of aligment so we should sufficient
* space below them for adding new commands.
*/
#define OUT_BATCH(batch, i, val) \
do { \
if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) { \
ret = -ENOSPC; \
goto err_out; \
} \
(batch)[(i)++] = (val); \
} while(0)
 
static int render_state_setup(struct render_state *so)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
96,8 → 114,10
s = lower_32_bits(r);
if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
return -EINVAL;
rodata->batch[i + 1] != 0) {
ret = -EINVAL;
goto err_out;
}
 
d[i++] = s;
s = upper_32_bits(r);
108,6 → 128,21
 
d[i++] = s;
}
 
while (i % CACHELINE_DWORDS)
OUT_BATCH(d, i, MI_NOOP);
 
so->aux_batch_offset = i * sizeof(u32);
 
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
 
/*
* Since we are sending length, we need to strictly conform to
* all requirements. For Gen2 this must be a multiple of 8.
*/
so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
 
kunmap(page);
 
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
120,8 → 155,14
}
 
return 0;
 
err_out:
kunmap(page);
return ret;
}
 
#undef OUT_BATCH
 
void i915_gem_render_state_fini(struct render_state *so)
{
i915_gem_object_ggtt_unpin(so->obj);
152,12 → 193,12
return 0;
}
 
int i915_gem_render_state_init(struct intel_engine_cs *ring)
int i915_gem_render_state_init(struct drm_i915_gem_request *req)
{
struct render_state so;
int ret;
 
ret = i915_gem_render_state_prepare(ring, &so);
ret = i915_gem_render_state_prepare(req->ring, &so);
if (ret)
return ret;
 
164,17 → 205,24
if (so.rodata == NULL)
return 0;
 
ret = ring->dispatch_execbuffer(ring,
so.ggtt_offset,
ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
 
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
if (so.aux_batch_size > 8) {
ret = req->ring->dispatch_execbuffer(req,
(so.ggtt_offset +
so.aux_batch_offset),
so.aux_batch_size,
I915_DISPATCH_SECURE);
if (ret)
goto out;
}
 
ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
out:
i915_gem_render_state_fini(&so);
return ret;
/drivers/video/drm/i915/i915_gem_render_state.h
37,9 → 37,11
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
u32 aux_batch_size;
u32 aux_batch_offset;
};
 
int i915_gem_render_state_init(struct intel_engine_cs *ring);
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so);
/drivers/video/drm/i915/i915_gem_stolen.c
30,6 → 30,9
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
#define KB(x) ((x) * 1024)
#define MB(x) (KB(x) * 1024)
 
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
42,6 → 45,46
* for is a boon.
*/
 
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start, u64 end)
{
int ret;
 
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
 
/* See the comment at the drm_mm_init() call for more about this check.
* WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
start = 4096;
 
mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
alignment, start, end,
DRM_MM_SEARCH_DEFAULT);
mutex_unlock(&dev_priv->mm.stolen_lock);
 
return ret;
}
 
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
alignment, 0,
dev_priv->gtt.stolen_usable_size);
}
 
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node)
{
mutex_lock(&dev_priv->mm.stolen_lock);
drm_mm_remove_node(node);
mutex_unlock(&dev_priv->mm.stolen_lock);
}
 
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
51,13 → 94,13
/* Almost universally we can find the Graphics Base of Stolen Memory
* at offset 0x5c in the igfx configuration space. On a few (desktop)
* machines this is also mirrored in the bridge device at different
* locations, or in the MCHBAR. On gen2, the layout is again slightly
* different with the Graphics Segment immediately following Top of
* Memory (or Top of Usable DRAM). Note it appears that TOUD is only
* reported by 865g, so we just use the top of memory as determined
* by the e820 probe.
* locations, or in the MCHBAR.
*
* XXX However gen2 requires an unavailable symbol.
* On 865 we just check the TOUD register.
*
* On 830/845/85x the stolen memory base isn't available in any
* register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
*
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 3) {
152,151 → 195,139
return base;
}
 
static int find_compression_threshold(struct drm_device *dev,
struct drm_mm_node *node,
int size,
int fb_cpp)
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int compression_threshold = 1;
int ret;
 
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
*
* The enable_fbc code will attempt to use one of our 2 compression
* thresholds, therefore, in that case, we only have 1 resort.
*/
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
 
/* Try to over-allocate to reduce reallocations and fragmentation. */
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret == 0)
return compression_threshold;
 
again:
/* HW's ability to limit the CFB is 1:4 */
if (compression_threshold > 4 ||
(fb_cpp == 2 && compression_threshold == 2))
return 0;
 
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT);
if (ret && INTEL_INFO(dev)->gen <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
goto again;
} else {
return compression_threshold;
drm_mm_takedown(&dev_priv->mm.stolen);
}
}
 
static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base +
dev_priv->gtt.stolen_size;
 
ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
else if (ret > 1) {
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
 
}
WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
 
dev_priv->fbc.threshold = ret;
 
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
 
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
4096, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret)
goto err_fb;
 
dev_priv->fbc.compressed_llb = compressed_llb;
 
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
/* On these platforms, the register doesn't have a size field, so the
* size is the distance between the base and the top of the stolen
* memory. We also have the genuine case where base is zero and there's
* nothing reserved. */
if (*base == 0)
*size = 0;
else
*size = stolen_top - *base;
}
 
dev_priv->fbc.size = size / dev_priv->fbc.threshold;
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 
return 0;
 
err_fb:
kfree(compressed_llb);
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
case GEN6_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
case GEN6_STOLEN_RESERVED_512K:
*size = 512 * 1024;
break;
case GEN6_STOLEN_RESERVED_256K:
*size = 256 * 1024;
break;
case GEN6_STOLEN_RESERVED_128K:
*size = 128 * 1024;
break;
default:
*size = 1024 * 1024;
MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
}
}
 
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
 
if (size < dev_priv->fbc.size)
return 0;
 
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
 
return i915_setup_compression(dev, size, fb_cpp);
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
case GEN7_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
case GEN7_STOLEN_RESERVED_256K:
*size = 256 * 1024;
break;
default:
*size = 1024 * 1024;
MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
}
}
 
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 
if (dev_priv->fbc.size == 0)
return;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
 
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
case GEN8_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
break;
case GEN8_STOLEN_RESERVED_2M:
*size = 2 * 1024 * 1024;
break;
case GEN8_STOLEN_RESERVED_4M:
*size = 4 * 1024 * 1024;
break;
case GEN8_STOLEN_RESERVED_8M:
*size = 8 * 1024 * 1024;
break;
default:
*size = 8 * 1024 * 1024;
MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
}
 
dev_priv->fbc.size = 0;
}
 
void i915_gem_cleanup_stolen(struct drm_device *dev)
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top;
 
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
 
i915_gem_stolen_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 
/* On these platforms, the register doesn't have a size field, so the
* size is the distance between the base and the top of the stolen
* memory. We also have the genuine case where base is zero and there's
* nothing reserved. */
if (*base == 0)
*size = 0;
else
*size = stolen_top - *base;
}
 
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
int bios_reserved = 0;
unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top;
 
mutex_init(&dev_priv->mm.stolen_lock);
 
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
311,27 → 342,79
if (dev_priv->mm.stolen_base == 0)
return 0;
 
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
 
if (INTEL_INFO(dev)->gen >= 8) {
tmp = I915_READ(GEN7_BIOS_RESERVED);
tmp >>= GEN8_BIOS_RESERVED_SHIFT;
tmp &= GEN8_BIOS_RESERVED_MASK;
bios_reserved = (1024*1024) << tmp;
} else if (IS_GEN7(dev)) {
tmp = I915_READ(GEN7_BIOS_RESERVED);
bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
256*1024 : 1024*1024;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
case 3:
break;
case 4:
if (IS_G4X(dev))
g4x_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
case 5:
/* Assume the gen6 maximum for the older platforms. */
reserved_size = 1024 * 1024;
reserved_base = stolen_top - reserved_size;
break;
case 6:
gen6_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
case 7:
gen7_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
default:
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
else
gen8_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
}
 
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
/* It is possible for the reserved base to be zero, but the register
* field for size doesn't have a zero option. */
if (reserved_base == 0) {
reserved_size = 0;
reserved_base = stolen_top;
}
 
if (reserved_base < dev_priv->mm.stolen_base ||
reserved_base + reserved_size > stolen_top) {
DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
reserved_base, reserved_base + reserved_size,
dev_priv->mm.stolen_base, stolen_top);
return 0;
}
 
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved);
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
 
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
dev_priv->gtt.stolen_size >> 10,
(dev_priv->gtt.stolen_size - reserved_total) >> 10);
 
dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
reserved_total;
 
/*
* Basic memrange allocator for stolen space.
*
* TODO: Notice that some platforms require us to not use the first page
* of the stolen memory but their BIOSes may still put the framebuffer
* on the first page. So we don't reserve this page for now because of
* that. Our current solution is to just prevent new nodes from being
* inserted on the first page - see the check we have at
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later.
*/
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
 
return 0;
}
 
387,8 → 470,10
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
if (obj->stolen) {
drm_mm_remove_node(obj->stolen);
i915_gem_stolen_remove_node(dev_priv, obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
417,7 → 502,6
if (obj->pages == NULL)
goto cleanup;
 
obj->has_dma_mapping = true;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
 
450,8 → 534,7
if (!stolen)
return NULL;
 
ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
4096, DRM_MM_SEARCH_DEFAULT);
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) {
kfree(stolen);
return NULL;
461,7 → 544,7
if (obj)
return obj;
 
drm_mm_remove_node(stolen);
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
}
486,10 → 569,8
stolen_offset, gtt_offset, size);
 
/* KISS and expect everything to be page-aligned */
BUG_ON(stolen_offset & 4095);
BUG_ON(size & 4095);
 
if (WARN_ON(size == 0))
if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
WARN_ON(stolen_offset & 4095))
return NULL;
 
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
498,7 → 579,9
 
stolen->start = stolen_offset;
stolen->size = size;
mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
mutex_unlock(&dev_priv->mm.stolen_lock);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
508,7 → 591,7
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
drm_mm_remove_node(stolen);
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
return NULL;
}
520,7 → 603,7
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
goto err;
}
 
/* To simplify the initialisation sequence between KMS and GTT,
534,23 → 617,20
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err_vma;
goto err;
}
}
 
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
}
 
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
i915_gem_object_pin_pages(obj);
 
return obj;
 
err_vma:
i915_gem_vma_destroy(vma);
err_out:
drm_mm_remove_node(stolen);
kfree(stolen);
err:
drm_gem_object_unreference(&obj->base);
return NULL;
}
/drivers/video/drm/i915/i915_gem_tiling.c
31,202 → 31,33
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
/** @file i915_gem_tiling.c
/**
* DOC: buffer object tiling
*
* Support for managing tiling state of buffer objects.
* i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
* declare fence register requirements.
*
* The idea behind tiling is to increase cache hit rates by rearranging
* pixel data so that a group of pixel accesses are in the same cacheline.
* Performance improvement from doing this on the back/depth buffer are on
* the order of 30%.
* In principle GEM doesn't care at all about the internal data layout of an
* object, and hence it also doesn't care about tiling or swizzling. There's two
* exceptions:
*
* Intel architectures make this somewhat more complicated, though, by
* adjustments made to addressing of data when the memory is in interleaved
* mode (matched pairs of DIMMS) to improve memory bandwidth.
* For interleaved memory, the CPU sends every sequential 64 bytes
* to an alternate memory channel so it can get the bandwidth from both.
* - For X and Y tiling the hardware provides detilers for CPU access, so called
* fences. Since there's only a limited amount of them the kernel must manage
* these, and therefore userspace must tell the kernel the object tiling if it
* wants to use fences for detiling.
* - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
* depends upon the physical page frame number. When swapping such objects the
* page frame number might change and the kernel must be able to fix this up
* and hence now the tiling. Note that on a subset of platforms with
* asymmetric memory channel population the swizzling pattern changes in an
* unknown way, and for those the kernel simply forbids swapping completely.
*
* The GPU also rearranges its accesses for increased bandwidth to interleaved
* memory, and it matches what the CPU does for non-tiled. However, when tiled
* it does it a little differently, since one walks addresses not just in the
* X direction but also Y. So, along with alternating channels when bit
* 6 of the address flips, it also alternates when other bits flip -- Bits 9
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
* are common to both the 915 and 965-class hardware.
*
* The CPU also sometimes XORs in higher bits as well, to improve
* bandwidth doing strided access like we do so frequently in graphics. This
* is called "Channel XOR Randomization" in the MCH documentation. The result
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
* decode.
*
* All of this bit 6 XORing has an effect on our memory management,
* as we need to make sure that the 3d driver can correctly address object
* contents.
*
* If we don't have interleaved memory, all tiling is safe and no swizzling is
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an objet out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
* Otherwise, if interleaved, we have to tell the 3d driver what the address
* swizzling it needs to do is, since it's writing with the CPU to the pages
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
* to match what the GPU expects.
* Since neither of this applies for new tiling layouts on modern platforms like
* W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
* Anything else can be handled in userspace entirely without the kernel's
* invovlement.
*/
 
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
* the extra address manipulation GPU side.
*
* VLV and CHV don't have GPU swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/* Enable swizzling when the channels are populated
* with identically sized dimms. We don't need to check
* the 3rd channel because no cpu with gpu attached
* ships in that configuration. Also, swizzling only
* makes sense for 2 channels anyway. */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
 
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
* 9 for Y tiled. The CPU's interleave is independent, and
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
/* This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
swizzle_y = I915_BIT_6_SWIZZLE_9_17;
}
break;
}
 
/* check for L-shaped memory aka modified enhanced addressing */
if (IS_GEN4(dev)) {
uint32_t ddc2 = I915_READ(DCC2);
 
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
}
 
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*/
if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
 
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
 
/* Check pitch constriants for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
313,8 → 144,18
}
 
/**
* i915_gem_set_tiling - IOCTL handler to set tiling mode
* @dev: DRM device
* @data: data pointer for the ioctl
* @file: DRM file for the ioctl call
*
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
335,9 → 176,10
return -EINVAL;
}
 
if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
ret = -EBUSY;
goto err;
}
 
if (args->tiling_mode == I915_TILING_NONE) {
369,7 → 211,6
}
}
 
mutex_lock(&dev->struct_mutex);
if (args->tiling_mode != obj->tiling_mode ||
args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
399,7 → 240,7
}
 
obj->fence_dirty =
obj->last_fenced_seqno ||
obj->last_fenced_req ||
obj->fence_reg != I915_FENCE_REG_NONE;
 
obj->tiling_mode = args->tiling_mode;
424,6 → 265,7
obj->bit_17 = NULL;
}
 
err:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
 
431,7 → 273,17
}
 
/**
* i915_gem_get_tiling - IOCTL handler to get tiling mode
* @dev: DRM device
* @data: data pointer for the ioctl
* @file: DRM file for the ioctl call
*
* Returns the current tiling mode and required bit 6 swizzling for the object.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
463,6 → 315,9
}
 
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
474,77 → 329,3
 
return 0;
}
 
#if 0
/**
* Swap every 64 bytes of this page around, to account for it having a new
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
static void
i915_gem_swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
int i;
 
vaddr = kmap(page);
 
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
memcpy(&vaddr[i], &vaddr[i + 64], 64);
memcpy(&vaddr[i + 64], temp, 64);
}
 
kunmap(page);
}
 
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct sg_page_iter sg_iter;
int i;
 
if (obj->bit_17 == NULL)
return;
 
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
i++;
}
}
 
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct sg_page_iter sg_iter;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
 
if (obj->bit_17 == NULL) {
obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
}
}
 
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
i++;
}
}
#endif
/drivers/video/drm/i915/i915_gpu_error.c
30,10 → 30,6
#include "i915_drv.h"
 
#if 0
static const char *yesno(int v)
{
return v ? "yes" : "no";
}
 
static const char *ring_str(int ring)
{
192,15 → 188,21
struct drm_i915_error_buffer *err,
int count)
{
int i;
 
err_printf(m, " %s [%d]:\n", name, count);
 
while (count--) {
err_printf(m, " %08x %8u %02x %02x %x %x",
err->gtt_offset,
err_printf(m, " %08x_%08x %8u %02x %02x [ ",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
err->write_domain,
err->rseqno, err->wseqno);
err->write_domain);
for (i = 0; i < I915_NUM_RINGS; i++)
err_printf(m, "%02x ", err->rseqno[i]);
 
err_printf(m, "] %02x", err->wseqno);
err_puts(m, pin_flag(err->pinned));
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
242,11 → 244,16
 
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct drm_i915_error_ring *ring)
struct drm_i915_error_state *error,
int ring_idx)
{
struct drm_i915_error_ring *ring = &error->ring[ring_idx];
 
if (!ring->valid)
return;
 
err_printf(m, "%s command stream:\n", ring_str(ring_idx));
err_printf(m, " START: 0x%08x\n", ring->start);
err_printf(m, " HEAD: 0x%08x\n", ring->head);
err_printf(m, " TAIL: 0x%08x\n", ring->tail);
err_printf(m, " CTL: 0x%08x\n", ring->ctl);
329,6 → 336,7
struct drm_device *dev = error_priv->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
int i, j, offset, elt;
int max_hangcheck_score;
 
358,8 → 366,15
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
if (INTEL_INFO(dev)->gen >= 8) {
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
} else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
375,6 → 390,11
 
if (INTEL_INFO(dev)->gen >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
 
if (INTEL_INFO(dev)->gen >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
 
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
 
381,24 → 401,22
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
err_printf(m, "%s command stream:\n", ring_str(i));
i915_ring_error_state(m, dev, &error->ring[i]);
}
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
i915_ring_error_state(m, dev, error, i);
 
if (error->active_bo)
for (i = 0; i < error->vm_count; i++) {
err_printf(m, "vm[%d]\n", i);
 
print_error_buffers(m, "Active",
error->active_bo[0],
error->active_bo_count[0]);
error->active_bo[i],
error->active_bo_count[i]);
 
if (error->pinned_bo)
print_error_buffers(m, "Pinned",
error->pinned_bo[0],
error->pinned_bo_count[0]);
error->pinned_bo[i],
error->pinned_bo_count[i]);
}
 
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
 
obj = error->ring[i].batchbuffer;
if (obj) {
err_puts(m, dev_priv->ring[i].name);
406,8 → 424,9
err_printf(m, " (submitted by %s [%d])",
error->ring[i].comm,
error->ring[i].pid);
err_printf(m, " --- gtt_offset = 0x%08x\n",
obj->gtt_offset);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
 
414,7 → 433,8
obj = error->ring[i].wa_batchbuffer;
if (obj) {
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name, obj->gtt_offset);
dev_priv->ring[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
 
433,22 → 453,28
if ((obj = error->ring[i].ringbuffer)) {
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
 
if ((obj = error->ring[i].hws_page)) {
err_printf(m, "%s --- HW Status = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
u64 hws_offset = obj->gtt_offset;
u32 *hws_page = &obj->pages[0][0];
 
if (i915.enable_execlists) {
hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
hws_page = &obj->pages[LRC_PPHWSP_PN][0];
}
err_printf(m, "%s --- HW Status = 0x%08llx\n",
dev_priv->ring[i].name, hws_offset);
offset = 0;
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
offset,
obj->pages[0][elt],
obj->pages[0][elt+1],
obj->pages[0][elt+2],
obj->pages[0][elt+3]);
hws_page[elt],
hws_page[elt+1],
hws_page[elt+2],
hws_page[elt+3]);
offset += 16;
}
}
456,11 → 482,24
if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
}
 
if ((obj = error->semaphore_obj)) {
err_printf(m, "Semaphore page = 0x%08x\n",
lower_32_bits(obj->gtt_offset));
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
elt * 4,
obj->pages[0][elt],
obj->pages[0][elt+1],
obj->pages[0][elt+2],
obj->pages[0][elt+3]);
}
}
 
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
 
475,9 → 514,11
}
 
int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
struct drm_i915_private *i915,
size_t count, loff_t pos)
{
memset(ebuf, 0, sizeof(*ebuf));
ebuf->i915 = i915;
 
/* We need to have enough room to store any i915_error_state printf
* so that we can move it to start position.
525,6 → 566,7
 
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].wa_batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
531,7 → 573,15
kfree(error->ring[i].requests);
}
 
i915_error_object_free(error->semaphore_obj);
 
for (i = 0; i < error->vm_count; i++)
kfree(error->active_bo[i]);
 
kfree(error->active_bo);
kfree(error->active_bo_count);
kfree(error->pinned_bo);
kfree(error->pinned_bo_count);
kfree(error->overlay);
kfree(error->display);
kfree(error);
538,24 → 588,56
}
 
static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
struct i915_address_space *vm,
const int num_pages)
struct i915_address_space *vm)
{
struct drm_i915_error_object *dst;
int i;
u32 reloc_offset;
struct i915_vma *vma = NULL;
int num_pages;
bool use_ggtt;
int i = 0;
u64 reloc_offset;
 
if (src == NULL || src->pages == NULL)
return NULL;
 
num_pages = src->base.size >> PAGE_SHIFT;
 
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
 
reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
for (i = 0; i < num_pages; i++) {
if (i915_gem_obj_bound(src, vm))
dst->gtt_offset = i915_gem_obj_offset(src, vm);
else
dst->gtt_offset = -1;
 
reloc_offset = dst->gtt_offset;
if (i915_is_ggtt(vm))
vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
 
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
 
if (!(vma && vma->bound & GLOBAL_BIND))
goto unwind;
 
reloc_offset = i915_gem_obj_ggtt_offset(src);
if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
goto unwind;
}
 
/* Cannot access snooped pages through the aperture */
if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
goto unwind;
 
dst->page_count = num_pages;
while (num_pages--) {
unsigned long flags;
void *d;
 
564,10 → 646,7
goto unwind;
 
local_irq_save(flags);
if (src->cache_level == I915_CACHE_NONE &&
reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping &&
i915_is_ggtt(vm)) {
if (use_ggtt) {
void __iomem *s;
 
/* Simply ignore tiling or any overlapping fence.
579,14 → 658,6
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else if (src->stolen) {
unsigned long offset;
 
offset = dev_priv->mm.stolen_base;
offset += src->stolen->start;
offset += i << PAGE_SHIFT;
 
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
} else {
struct page *page;
void *s;
603,11 → 674,9
}
local_irq_restore(flags);
 
dst->pages[i] = d;
 
dst->pages[i++] = d;
reloc_offset += PAGE_SIZE;
}
dst->page_count = num_pages;
 
return dst;
 
617,22 → 686,21
kfree(dst);
return NULL;
}
#define i915_error_object_create(dev_priv, src, vm) \
i915_error_object_create_sized((dev_priv), (src), (vm), \
(src)->base.size>>PAGE_SHIFT)
 
#define i915_error_ggtt_object_create(dev_priv, src) \
i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
(src)->base.size>>PAGE_SHIFT)
i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
 
static void capture_bo(struct drm_i915_error_buffer *err,
struct drm_i915_gem_object *obj)
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
int i;
 
err->size = obj->base.size;
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
for (i = 0; i < I915_NUM_RINGS; i++)
err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
639,13 → 707,12
err->pinned = 0;
if (i915_gem_obj_is_pinned(obj))
err->pinned = 1;
if (obj->user_pin_count > 0)
err->pinned = -1;
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->ring = obj->ring ? obj->ring->id : -1;
err->ring = obj->last_write_req ?
i915_gem_request_get_ring(obj->last_write_req)->id : -1;
err->cache_level = obj->cache_level;
}
 
656,7 → 723,7
int i = 0;
 
list_for_each_entry(vma, head, mm_list) {
capture_bo(err++, vma->obj);
capture_bo(err++, vma);
if (++i == count)
break;
}
665,21 → 732,25
}
 
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head)
int count, struct list_head *head,
struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int i = 0;
struct drm_i915_error_buffer * const first = err;
struct drm_i915_error_buffer * const last = err + count;
 
list_for_each_entry(obj, head, global_list) {
if (!i915_gem_obj_is_pinned(obj))
continue;
struct i915_vma *vma;
 
capture_bo(err++, obj);
if (++i == count)
if (err == last)
break;
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
}
 
return i;
return err - first;
}
 
/* Generate a semi-unique error code. The code is not meant to have meaning, The
721,34 → 792,71
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 8:
case 7:
case 6:
if (IS_GEN3(dev) || IS_GEN2(dev)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
error->fence[i] = I915_READ(FENCE_REG(i));
} else if (IS_GEN5(dev) || IS_GEN4(dev)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
} else if (INTEL_INFO(dev)->gen >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
}
}
 
default:
BUG();
 
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
struct intel_engine_cs *to;
int i;
 
if (!i915_semaphore_is_enabled(dev_priv->dev))
return;
 
if (!error->semaphore_obj)
error->semaphore_obj =
i915_error_ggtt_object_create(dev_priv,
dev_priv->semaphore_obj);
 
for_each_ring(to, dev_priv, i) {
int idx;
u16 signal_offset;
u32 *tmp;
 
if (ring == to)
continue;
 
signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
/ 4;
tmp = error->semaphore_obj->pages[0];
idx = intel_ring_sync_index(ring, to);
 
ering->semaphore_mboxes[idx] = tmp[signal_offset];
ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
}
}
 
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
 
if (HAS_VEBOX(dev_priv->dev)) {
ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base));
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
}
}
 
static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
757,20 → 865,12
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
ering->semaphore_mboxes[0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
ering->semaphore_mboxes[1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
if (INTEL_INFO(dev)->gen >= 8)
gen8_record_semaphore_state(dev_priv, error, ring, ering);
else
gen6_record_semaphore_state(dev_priv, ring, ering);
}
 
if (HAS_VEBOX(dev)) {
ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base));
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
}
 
if (INTEL_INFO(dev)->gen >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
787,7 → 887,7
ering->faddr = I915_READ(DMA_FADD_I8XX);
ering->ipeir = I915_READ(IPEIR);
ering->ipehr = I915_READ(IPEHR);
ering->instdone = I915_READ(INSTDONE);
ering->instdone = I915_READ(GEN2_INSTDONE);
}
 
ering->waiting = waitqueue_active(&ring->irq_queue);
794,6 → 894,7
ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
ering->seqno = ring->get_seqno(ring, false);
ering->acthd = intel_ring_get_active_head(ring);
ering->start = I915_READ_START(ring);
ering->head = I915_READ_HEAD(ring);
ering->tail = I915_READ_TAIL(ring);
ering->ctl = I915_READ_CTL(ring);
827,9 → 928,6
ering->hws = I915_READ(mmio);
}
 
ering->cpu_ring_head = ring->buffer->head;
ering->cpu_ring_tail = ring->buffer->tail;
 
ering->hangcheck_score = ring->hangcheck.score;
ering->hangcheck_action = ring->hangcheck.action;
 
838,8 → 936,13
 
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
 
switch (INTEL_INFO(dev)->gen) {
case 8:
if (IS_GEN6(dev))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(ring));
else if (IS_GEN7(dev))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring));
else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(ring, i));
847,18 → 950,8
ering->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(ring, i));
}
break;
case 7:
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring));
break;
case 6:
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(ring));
break;
}
}
}
 
 
static void i915_gem_record_active_context(struct intel_engine_cs *ring,
873,6 → 966,9
return;
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!i915_gem_obj_ggtt_bound(obj))
continue;
 
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
break;
889,6 → 985,7
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_ringbuffer *rbuf;
 
error->ring[i].pid = -1;
 
897,10 → 994,16
 
error->ring[i].valid = true;
 
i915_record_ring_state(dev, ring, &error->ring[i]);
i915_record_ring_state(dev, error, ring, &error->ring[i]);
 
request = i915_gem_find_active_request(ring);
if (request) {
struct i915_address_space *vm;
 
vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
&dev_priv->gtt.base;
 
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
908,22 → 1011,18
error->ring[i].batchbuffer =
i915_error_object_create(dev_priv,
request->batch_obj,
request->ctx ?
request->ctx->vm :
&dev_priv->gtt.base);
vm);
 
if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
ring->scratch.obj)
if (HAS_BROKEN_CS_TLB(dev_priv->dev))
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
 
if (request->file_priv) {
if (request->pid) {
struct task_struct *task;
 
rcu_read_lock();
task = pid_task(request->file_priv->file->pid,
PIDTYPE_PID);
task = pid_task(request->pid, PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
932,10 → 1031,25
}
}
 
if (i915.enable_execlists) {
/* TODO: This is only a small fix to keep basic error
* capture working, but we need to add more information
* for it to be useful (e.g. dump the context being
* executed).
*/
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
else
rbuf = ring->default_context->engine[ring->id].ringbuf;
} else
rbuf = ring->buffer;
 
error->ring[i].cpu_ring_head = rbuf->head;
error->ring[i].cpu_ring_tail = rbuf->tail;
 
error->ring[i].ringbuffer =
i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
i915_error_ggtt_object_create(dev_priv, rbuf->obj);
 
if (ring->status_page.obj)
error->ring[i].hws_page =
i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
 
961,7 → 1075,7
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
erq->tail = request->tail;
erq->tail = request->postfix;
}
}
}
983,9 → 1097,12
list_for_each_entry(vma, &vm->active_list, mm_list)
i++;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (i915_gem_obj_is_pinned(obj))
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm && vma->pin_count > 0)
i++;
}
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
if (i) {
1004,7 → 1121,7
error->pinned_bo_count[ndx] =
capture_pinned_bo(pinned_bo,
error->pinned_bo_count[ndx],
&dev_priv->mm.bound_list);
&dev_priv->mm.bound_list, vm);
error->active_bo[ndx] = active_bo;
error->pinned_bo[ndx] = pinned_bo;
}
1025,9 → 1142,26
error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
GFP_ATOMIC);
 
if (error->active_bo == NULL ||
error->pinned_bo == NULL ||
error->active_bo_count == NULL ||
error->pinned_bo_count == NULL) {
kfree(error->active_bo);
kfree(error->active_bo_count);
kfree(error->pinned_bo);
kfree(error->pinned_bo_count);
 
error->active_bo = NULL;
error->active_bo_count = NULL;
error->pinned_bo = NULL;
error->pinned_bo_count = NULL;
} else {
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
i915_gem_capture_vm(dev_priv, error, vm, i++);
 
error->vm_count = cnt;
}
}
 
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1034,6 → 1168,7
struct drm_i915_error_state *error)
{
struct drm_device *dev = dev_priv->dev;
int i;
 
/* General organization
* 1. Registers specific to a single generation
1045,7 → 1180,8
 
/* 1: Registers specific to a single generation */
if (IS_VALLEYVIEW(dev)) {
error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ(FORCEWAKE_VLV);
}
 
1052,6 → 1188,11
if (IS_GEN7(dev))
error->err_int = I915_READ(GEN7_ERR_INT);
 
if (INTEL_INFO(dev)->gen >= 8) {
error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
 
if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
1078,16 → 1219,18
if (HAS_HW_CONTEXTS(dev))
error->ccid = I915_READ(CCID);
 
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
else {
if (IS_GEN2(dev))
if (INTEL_INFO(dev)->gen >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
} else if (HAS_PCH_SPLIT(dev)) {
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
} else if (IS_GEN2(dev)) {
error->ier = I915_READ16(IER);
else
} else if (!IS_VALLEYVIEW(dev)) {
error->ier = I915_READ(IER);
}
 
/* 4: Everything else */
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
 
1106,7 → 1249,8
ecode = i915_error_generate_code(dev_priv, error, &ring_id);
 
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:0x%08x", ring_id, ecode);
"GPU HANG: ecode %d:%d:0x%08x",
INTEL_INFO(dev)->gen, ring_id, ecode);
 
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
1124,6 → 1268,10
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU
error->iommu = intel_iommu_gfx_mapped;
#endif
error->reset_count = i915_reset_count(&dev_priv->gpu_error);
error->suspend_count = dev_priv->suspend_count;
}
1194,13 → 1342,12
struct i915_error_state_file_priv *error_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
spin_lock_irq(&dev_priv->gpu_error.lock);
error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
spin_unlock_irq(&dev_priv->gpu_error.lock);
 
}
 
1214,28 → 1361,27
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
spin_lock_irq(&dev_priv->gpu_error.lock);
error = dev_priv->gpu_error.first_error;
dev_priv->gpu_error.first_error = NULL;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
spin_unlock_irq(&dev_priv->gpu_error.lock);
 
if (error)
kref_put(&error->ref, i915_error_state_free);
}
#endif
 
const char *i915_cache_level_str(int type)
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped or LLC";
case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
case I915_CACHE_L3_LLC: return " L3+LLC";
case I915_CACHE_WT: return " WT";
default: return "";
}
}
#endif
 
/* NB: please notice the memset */
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1243,26 → 1389,15
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
 
switch (INTEL_INFO(dev)->gen) {
case 2:
case 3:
instdone[0] = I915_READ(INSTDONE);
break;
case 4:
case 5:
case 6:
instdone[0] = I915_READ(INSTDONE_I965);
instdone[1] = I915_READ(INSTDONE1);
break;
default:
WARN_ONCE(1, "Unsupported platform\n");
case 7:
case 8:
case 9:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
if (IS_GEN2(dev) || IS_GEN3(dev))
instdone[0] = I915_READ(GEN2_INSTDONE);
else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1);
} else if (INTEL_INFO(dev)->gen >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
break;
}
}
/drivers/video/drm/i915/i915_guc_reg.h
0,0 → 1,109
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _I915_GUC_REG_H_
#define _I915_GUC_REG_H_
 
/* Definitions of GuC H/W registers, bits, etc */
 
#define GUC_STATUS 0xc000
#define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
#define GS_UKERNEL_SHIFT 8
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
 
#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4))
 
#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4)
#define DMA_ADDR_0_LOW 0xc300
#define DMA_ADDR_0_HIGH 0xc304
#define DMA_ADDR_1_LOW 0xc308
#define DMA_ADDR_1_HIGH 0xc30c
#define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
#define DMA_ADDRESS_SPACE_GTT (8 << 16)
#define DMA_COPY_SIZE 0xc310
#define DMA_CTRL 0xc314
#define UOS_MOVE (1<<4)
#define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET 0xc340
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT 0xC3E4
 
#define GUC_WOPCM_SIZE 0xc050
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
 
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
 
#define GEN8_GT_PM_CONFIG 0x138140
#define GEN9LP_GT_PM_CONFIG 0x138140
#define GEN9_GT_PM_CONFIG 0x13816c
#define GT_DOORBELL_ENABLE (1<<0)
 
#define GEN8_GTCR 0x4274
#define GEN8_GTCR_INVALIDATE (1<<0)
 
#define GUC_ARAT_C6DIS 0xA178
 
#define GUC_SHIM_CONTROL 0xc064
#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
#define GUC_ENABLE_MIA_CACHING (1<<2)
#define GUC_GEN10_MSGCH_ENABLE (1<<4)
#define GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA (1<<9)
#define GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA (1<<10)
#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
 
#define GUC_SHIM_CONTROL_VALUE (GUC_DISABLE_SRAM_INIT_TO_ZEROES | \
GUC_ENABLE_READ_CACHE_LOGIC | \
GUC_ENABLE_MIA_CACHING | \
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
GUC_ENABLE_MIA_CLOCK_GATING)
 
#define HOST2GUC_INTERRUPT 0xc4c8
#define HOST2GUC_TRIGGER (1<<0)
 
#define DRBMISC1 0x1984
#define DOORBELL_ENABLE (1<<0)
 
#define GEN8_DRBREGL(x) (0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4)
 
#define DE_GUCRMR 0x44054
 
#define GUC_BCS_RCS_IER 0xC550
#define GUC_VCS2_VCS1_IER 0xC554
#define GUC_WD_VECS_IER 0xC558
#define GUC_PM_P24C_IER 0xC55C
 
#endif
/drivers/video/drm/i915/i915_guc_submission.c
0,0 → 1,976
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include <linux/circ_buf.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_guc.h"
 
/**
* DOC: GuC Client
*
* i915_guc_client:
* We use the term client to avoid confusion with contexts. A i915_guc_client is
* equivalent to GuC object guc_context_desc. This context descriptor is
* allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
* and workqueue for it. Also the process descriptor (guc_process_desc), which
* is mapped to client space. So the client can write Work Item then ring the
* doorbell.
*
* To simplify the implementation, we allocate one gem object that contains all
* pages for doorbell, process descriptor and workqueue.
*
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
* a value to the action register (SOFT_SCRATCH_0) along with any data. It then
* triggers an interrupt on the GuC via another register write (0xC4C8).
* Firmware writes a success/fail code back to the action register after
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
* See host2guc_action()
*
* Doorbells:
* Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
* mapped into process space.
*
* Work Items:
* There are several types of work items that the host may place into a
* workqueue, each with its own requirements and limitations. Currently only
* WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
* represents in-order queue. The kernel driver packs ring tail pointer and an
* ELSP context descriptor dword into Work Item.
* See guc_add_workqueue_item()
*
*/
 
/*
* Read GuC command/status register (SOFT_SCRATCH_0)
* Return true if it contains a response rather than a command
*/
static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
u32 *status)
{
u32 val = I915_READ(SOFT_SCRATCH(0));
*status = val;
return GUC2HOST_IS_RESPONSE(val);
}
 
static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 status;
int i;
int ret;
 
if (WARN_ON(len < 1 || len > 15))
return -EINVAL;
 
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
spin_lock(&dev_priv->guc.host2guc_lock);
 
dev_priv->guc.action_count += 1;
dev_priv->guc.action_cmd = data[0];
 
for (i = 0; i < len; i++)
I915_WRITE(SOFT_SCRATCH(i), data[i]);
 
POSTING_READ(SOFT_SCRATCH(i - 1));
 
I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
 
/* No HOST2GUC command should take longer than 10ms */
ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
if (status != GUC2HOST_STATUS_SUCCESS) {
/*
* Either the GuC explicitly returned an error (which
* we convert to -EIO here) or no response at all was
* received within the timeout limit (-ETIMEDOUT)
*/
if (ret != -ETIMEDOUT)
ret = -EIO;
 
DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
"status=0x%08X response=0x%08X\n",
data[0], ret, status,
I915_READ(SOFT_SCRATCH(15)));
 
dev_priv->guc.action_fail += 1;
dev_priv->guc.action_err = ret;
}
dev_priv->guc.action_status = status;
 
spin_unlock(&dev_priv->guc.host2guc_lock);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
return ret;
}
 
/*
* Tell the GuC to allocate or deallocate a specific doorbell
*/
 
static int host2guc_allocate_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
u32 data[2];
 
data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
data[1] = client->ctx_index;
 
return host2guc_action(guc, data, 2);
}
 
static int host2guc_release_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
u32 data[2];
 
data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
data[1] = client->ctx_index;
 
return host2guc_action(guc, data, 2);
}
 
static int host2guc_sample_forcewake(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_device *dev = dev_priv->dev;
u32 data[2];
 
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
(IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
 
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
 
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
* These functions modify shared data and so need access to the mapped
* client object which contains the page being used for the doorbell
*/
 
static void guc_init_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_doorbell_info *doorbell;
void *base;
 
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
doorbell = base + client->doorbell_offset;
 
doorbell->db_status = 1;
doorbell->cookie = 0;
 
kunmap_atomic(base);
}
 
static int guc_ring_doorbell(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
void *base;
int attempt = 2, ret = -EAGAIN;
 
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
 
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
 
/* current cookie */
db_cmp.db_status = GUC_DOORBELL_ENABLED;
db_cmp.cookie = gc->cookie;
 
/* cookie to be updated */
db_exc.db_status = GUC_DOORBELL_ENABLED;
db_exc.cookie = gc->cookie + 1;
if (db_exc.cookie == 0)
db_exc.cookie = 1;
 
/* pointer of current doorbell cacheline */
db = base + gc->doorbell_offset;
 
while (attempt--) {
/* lets ring the doorbell */
db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
db_cmp.value_qw, db_exc.value_qw);
 
/* if the exchange was successfully executed */
if (db_ret.value_qw == db_cmp.value_qw) {
/* db was successfully rung */
gc->cookie = db_exc.cookie;
ret = 0;
break;
}
 
/* XXX: doorbell was lost and need to acquire it again */
if (db_ret.db_status == GUC_DOORBELL_DISABLED)
break;
 
DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
db_cmp.cookie, db_ret.cookie);
 
/* update the cookie to newly read cookie from GuC */
db_cmp.cookie = db_ret.cookie;
db_exc.cookie = db_ret.cookie + 1;
if (db_exc.cookie == 0)
db_exc.cookie = 1;
}
 
kunmap_atomic(base);
return ret;
}
 
static void guc_disable_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct guc_doorbell_info *doorbell;
void *base;
int drbreg = GEN8_DRBREGL(client->doorbell_id);
int value;
 
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
doorbell = base + client->doorbell_offset;
 
doorbell->db_status = 0;
 
kunmap_atomic(base);
 
I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
 
value = I915_READ(drbreg);
WARN_ON((value & GEN8_DRB_VALID) != 0);
 
I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0);
I915_WRITE(drbreg, 0);
 
/* XXX: wait for any interrupts */
/* XXX: wait for workqueue to drain */
}
 
/*
* Select, assign and relase doorbell cachelines
*
* These functions track which doorbell cachelines are in use.
* The data they manipulate is protected by the host2guc lock.
*/
 
static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
{
const uint32_t cacheline_size = cache_line_size();
uint32_t offset;
 
spin_lock(&guc->host2guc_lock);
 
/* Doorbell uses a single cache line within a page */
offset = offset_in_page(guc->db_cacheline);
 
/* Moving to next cache line to reduce contention */
guc->db_cacheline += cacheline_size;
 
spin_unlock(&guc->host2guc_lock);
 
DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
offset, guc->db_cacheline, cacheline_size);
 
return offset;
}
 
static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
{
/*
* The bitmap is split into two halves; the first half is used for
* normal priority contexts, the second half for high-priority ones.
* Note that logically higher priorities are numerically less than
* normal ones, so the test below means "is it high-priority?"
*/
const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
const uint16_t half = GUC_MAX_DOORBELLS / 2;
const uint16_t start = hi_pri ? half : 0;
const uint16_t end = start + half;
uint16_t id;
 
spin_lock(&guc->host2guc_lock);
id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
if (id == end)
id = GUC_INVALID_DOORBELL_ID;
else
bitmap_set(guc->doorbell_bitmap, id, 1);
spin_unlock(&guc->host2guc_lock);
 
DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
hi_pri ? "high" : "normal", id);
 
return id;
}
 
static void release_doorbell(struct intel_guc *guc, uint16_t id)
{
spin_lock(&guc->host2guc_lock);
bitmap_clear(guc->doorbell_bitmap, id, 1);
spin_unlock(&guc->host2guc_lock);
}
 
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
static void guc_init_proc_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_process_desc *desc;
void *base;
 
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
desc = base + client->proc_desc_offset;
 
memset(desc, 0, sizeof(*desc));
 
/*
* XXX: pDoorbell and WQVBaseAddress are pointers in process address
* space for ring3 clients (set them as in mmap_ioctl) or kernel
* space for kernel clients (map on demand instead? May make debug
* easier to have it mapped).
*/
desc->wq_base_addr = 0;
desc->db_base_addr = 0;
 
desc->context_id = client->ctx_index;
desc->wq_size_bytes = client->wq_size;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
 
kunmap_atomic(base);
}
 
/*
* Initialise/clear the context descriptor shared with the GuC firmware.
*
* This descriptor tells the GuC where (in GGTT space) to find the important
* data structures relating to this client (doorbell, process descriptor,
* write queue, etc).
*/
 
static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
int i;
 
memset(&desc, 0, sizeof(desc));
 
desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
desc.context_id = client->ctx_index;
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct guc_execlist_context *lrc = &desc.lrc[i];
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct intel_engine_cs *ring;
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
 
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
* user. But here GuC expects the lrc and ring to be pinned. It
* is not an issue for default context, which is the only one
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
obj = ctx->engine[i].state;
if (!obj)
break; /* XXX: continue? */
 
ring = ringbuf->ring;
ctx_desc = intel_lr_context_descriptor(ctx, ring);
lrc->context_desc = (u32)ctx_desc;
 
/* The state page is after PPHWSP */
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(ring->id << GUC_ELC_ENGINE_OFFSET);
 
obj = ringbuf->obj;
 
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
 
desc.engines_used |= (1 << ring->id);
}
 
WARN_ON(desc.engines_used == 0);
 
/*
* The CPU address is only needed at certain points, so kmap_atomic on
* demand instead of storing it in the ctx descriptor.
* XXX: May make debug easier to have it mapped
*/
desc.db_trigger_cpu = 0;
desc.db_trigger_uk = client->doorbell_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
desc.db_trigger_phy = client->doorbell_offset +
sg_dma_address(client->client_obj->pages->sgl);
 
desc.process_desc = client->proc_desc_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
 
desc.wq_addr = client->wq_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
 
desc.wq_size = client->wq_size;
 
/*
* XXX: Take LRCs from an existing intel_context if this is not an
* IsKMDCreatedContext client
*/
desc.desc_private = (uintptr_t)client;
 
/* Pool context is pinned already */
sg = guc->ctx_pool_obj->pages;
sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
sizeof(desc) * client->ctx_index);
}
 
static void guc_fini_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_context_desc desc;
struct sg_table *sg;
 
memset(&desc, 0, sizeof(desc));
 
sg = guc->ctx_pool_obj->pages;
sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
sizeof(desc) * client->ctx_index);
}
 
/* Get valid workqueue item and return it back to offset */
static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
{
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = 0, timeout_counter = 200;
 
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
 
while (timeout_counter-- > 0) {
ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
gc->wq_size) >= size, 1);
 
if (!ret) {
*offset = gc->wq_tail;
 
/* advance the tail for next workqueue item */
gc->wq_tail += size;
gc->wq_tail &= gc->wq_size - 1;
 
/* this will break the loop */
timeout_counter = 0;
}
};
 
kunmap_atomic(base);
 
return ret;
}
 
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off = 0;
int ret;
 
ret = guc_get_workqueue_space(gc, &wq_off);
if (ret)
return ret;
 
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
* wrapped to the beginning. This simplifies the implementation below.
*
* XXX: if not the case, we need save data to a temp wqi and copy it to
* workqueue buffer dw by dw.
*/
WARN_ON(sizeof(struct guc_wq_item) != 16);
WARN_ON(wq_off & 3);
 
/* wq starts from the page after doorbell / process_desc */
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
(wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
wq_off &= PAGE_SIZE - 1;
wqi = (struct guc_wq_item *)((char *)base + wq_off);
 
/* len does not include the header */
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) |
(ring_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
 
/* The GuC wants only the low-order word of the context descriptor */
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
 
/* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->ringbuf->tail >> 3;
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = 0; /*XXX: what fence to be here */
 
kunmap_atomic(base);
 
return 0;
}
 
#define CTX_RING_BUFFER_START 0x08
 
/* Update the ringbuffer pointer in a saved context image */
static void lr_context_update(struct drm_i915_gem_request *rq)
{
enum intel_ring_id ring_id = rq->ring->id;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
struct page *page;
uint32_t *reg_state;
 
BUG_ON(!ctx_obj);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
kunmap_atomic(reg_state);
}
 
/**
* i915_guc_submit() - Submit commands through GuC
* @client: the guc client where commands will go through
* @ctx: LRC where commands come from
* @ring: HW engine that will excute the commands
*
* Return: 0 if succeed
*/
int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
struct intel_guc *guc = client->guc;
enum intel_ring_id ring_id = rq->ring->id;
unsigned long flags;
int q_ret, b_ret;
 
/* Need this because of the deferred pin ctx and ring */
/* Shall we move this right after ring is pinned? */
lr_context_update(rq);
 
spin_lock_irqsave(&client->wq_lock, flags);
 
q_ret = guc_add_workqueue_item(client, rq);
if (q_ret == 0)
b_ret = guc_ring_doorbell(client);
 
client->submissions[ring_id] += 1;
if (q_ret) {
client->q_fail += 1;
client->retcode = q_ret;
} else if (b_ret) {
client->b_fail += 1;
client->retcode = q_ret = b_ret;
} else {
client->retcode = 0;
}
spin_unlock_irqrestore(&client->wq_lock, flags);
 
spin_lock(&guc->host2guc_lock);
guc->submissions[ring_id] += 1;
guc->last_seqno[ring_id] = rq->seqno;
spin_unlock(&guc->host2guc_lock);
 
return q_ret;
}
 
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
* path of i915_guc_submit() above.
*/
 
/**
* gem_allocate_guc_obj() - Allocate gem object for GuC usage
* @dev: drm device
* @size: size of object
*
* This is a wrapper to create a gem obj. In order to use it inside GuC, the
* object needs to be pinned lifetime. Also we must pin it to gtt space other
* than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
*
* Return: A drm_i915_gem_object if successful, otherwise NULL.
*/
static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
 
obj = i915_gem_alloc_object(dev, size);
if (!obj)
return NULL;
 
if (i915_gem_object_get_pages(obj)) {
drm_gem_object_unreference(&obj->base);
return NULL;
}
 
if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
drm_gem_object_unreference(&obj->base);
return NULL;
}
 
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 
return obj;
}
 
/**
* gem_release_guc_obj() - Release gem object allocated for GuC usage
* @obj: gem obj to be released
*/
static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
{
if (!obj)
return;
 
if (i915_gem_obj_is_pinned(obj))
i915_gem_object_ggtt_unpin(obj);
 
drm_gem_object_unreference(&obj->base);
}
 
static void guc_client_free(struct drm_device *dev,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
 
if (!client)
return;
 
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
/*
* First disable the doorbell, then tell the GuC we've
* finished with it, finally deallocate it in our bitmap
*/
guc_disable_doorbell(guc, client);
host2guc_release_doorbell(guc, client);
release_doorbell(guc, client->doorbell_id);
}
 
/*
* XXX: wait for any outstanding submissions before freeing memory.
* Be sure to drop any locks
*/
 
gem_release_guc_obj(client->client_obj);
 
if (client->ctx_index != GUC_INVALID_CTX_ID) {
guc_fini_ctx_desc(guc, client);
ida_simple_remove(&guc->ctx_ids, client->ctx_index);
}
 
kfree(client);
}
 
/**
* guc_client_alloc() - Allocate an i915_guc_client
* @dev: drm device
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
* The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH,
* while a preemption context can use CRITICAL.
* @ctx the context to own the client (we use the default render context)
*
* Return: An i915_guc_client object if success.
*/
static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
uint32_t priority,
struct intel_context *ctx)
{
struct i915_guc_client *client;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct drm_i915_gem_object *obj;
 
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return NULL;
 
client->doorbell_id = GUC_INVALID_DOORBELL_ID;
client->priority = priority;
client->owner = ctx;
client->guc = guc;
 
client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
client->ctx_index = GUC_INVALID_CTX_ID;
goto err;
}
 
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
if (!obj)
goto err;
 
client->client_obj = obj;
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
spin_lock_init(&client->wq_lock);
 
client->doorbell_offset = select_doorbell_cacheline(guc);
 
/*
* Since the doorbell only requires a single cacheline, we can save
* space by putting the application process descriptor in the same
* page. Use the half of the page that doesn't include the doorbell.
*/
if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
client->proc_desc_offset = 0;
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
client->doorbell_id = assign_doorbell(guc, client->priority);
if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
/* XXX: evict a doorbell instead */
goto err;
 
guc_init_proc_desc(guc, client);
guc_init_ctx_desc(guc, client);
guc_init_doorbell(guc, client);
 
/* XXX: Any cache flushes needed? General domain mgmt calls? */
 
if (host2guc_allocate_doorbell(guc, client))
goto err;
 
DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
priority, client, client->ctx_index, client->doorbell_id);
 
return client;
 
err:
DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
 
guc_client_free(dev, client);
return NULL;
}
 
static void guc_create_log(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
unsigned long offset;
uint32_t size, flags;
 
if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
return;
 
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
 
/* The first page is to save log buffer state. Allocate one
* extra page for others in case for overlap */
size = (1 + GUC_LOG_DPC_PAGES + 1 +
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
 
obj = guc->log_obj;
if (!obj) {
obj = gem_allocate_guc_obj(dev_priv->dev, size);
if (!obj) {
/* logging will be off */
i915.guc_log_level = -1;
return;
}
 
guc->log_obj = obj;
}
 
/* each allocated unit is a page */
flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
 
offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
 
/*
* Set up the memory resources to be shared with the GuC. At this point,
* we require just one object that can be mapped through the GGTT.
*/
int i915_guc_submission_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const size_t ctxsize = sizeof(struct guc_context_desc);
const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
const size_t gemsize = round_up(poolsize, PAGE_SIZE);
struct intel_guc *guc = &dev_priv->guc;
 
if (!i915.enable_guc_submission)
return 0; /* not enabled */
 
if (guc->ctx_pool_obj)
return 0; /* already allocated */
 
guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
if (!guc->ctx_pool_obj)
return -ENOMEM;
 
spin_lock_init(&dev_priv->guc.host2guc_lock);
 
ida_init(&guc->ctx_ids);
 
guc_create_log(guc);
 
return 0;
}
 
int i915_guc_submission_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct intel_context *ctx = dev_priv->ring[RCS].default_context;
struct i915_guc_client *client;
 
/* client for execbuf submission */
client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
if (!client) {
DRM_ERROR("Failed to create execbuf guc_client\n");
return -ENOMEM;
}
 
guc->execbuf_client = client;
 
host2guc_sample_forcewake(guc, client);
 
return 0;
}
 
void i915_guc_submission_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
 
guc_client_free(dev, guc->execbuf_client);
guc->execbuf_client = NULL;
}
 
void i915_guc_submission_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
 
gem_release_guc_obj(dev_priv->guc.log_obj);
guc->log_obj = NULL;
 
if (guc->ctx_pool_obj)
ida_destroy(&guc->ctx_ids);
gem_release_guc_obj(guc->ctx_pool_obj);
guc->ctx_pool_obj = NULL;
}
 
/**
* intel_guc_suspend() - notify GuC entering suspend state
* @dev: drm device
*/
int intel_guc_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct intel_context *ctx;
u32 data[3];
 
if (!i915.enable_guc_submission)
return 0;
 
ctx = dev_priv->ring[RCS].default_context;
 
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */
data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
 
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
 
 
/**
* intel_guc_resume() - notify GuC resuming from suspend state
* @dev: drm device
*/
int intel_guc_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct intel_context *ctx;
u32 data[3];
 
if (!i915.enable_guc_submission)
return 0;
 
ctx = dev_priv->ring[RCS].default_context;
 
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */
data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
 
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
/drivers/video/drm/i915/i915_irq.c
43,7 → 43,19
* and related files, but that will be described in separate chapters.
*/
 
static const u32 hpd_ibx[] = {
static const u32 hpd_ilk[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};
 
static const u32 hpd_ivb[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
};
 
static const u32 hpd_bdw[HPD_NUM_PINS] = {
[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
};
 
static const u32 hpd_ibx[HPD_NUM_PINS] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
51,7 → 63,7
[HPD_PORT_D] = SDE_PORTD_HOTPLUG
};
 
static const u32 hpd_cpt[] = {
static const u32 hpd_cpt[HPD_NUM_PINS] = {
[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
59,7 → 71,15
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};
 
static const u32 hpd_mask_i915[] = {
static const u32 hpd_spt[HPD_NUM_PINS] = {
[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
};
 
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_EN,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68,7 → 88,7
[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
};
 
static const u32 hpd_status_g4x[] = {
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77,7 → 97,7
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86,6 → 106,13
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
 
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};
 
/* IIR can theoretically queue up two events. Be paranoid. */
#define GEN8_IRQ_RESET_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
110,20 → 137,23
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
u32 val = I915_READ(reg); \
if (val) { \
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
(reg), val); \
I915_WRITE((reg), 0xffffffff); \
POSTING_READ(reg); \
I915_WRITE((reg), 0xffffffff); \
POSTING_READ(reg); \
} \
} while (0)
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = I915_READ(reg);
 
if (val == 0)
return;
 
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
reg, val);
I915_WRITE(reg, 0xffffffff);
POSTING_READ(reg);
I915_WRITE(reg, 0xffffffff);
POSTING_READ(reg);
}
 
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
POSTING_READ(GEN8_##type##_IMR(which)); \
130,7 → 160,7
} while (0)
 
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
gen5_assert_iir_is_zero(dev_priv, type##IIR); \
I915_WRITE(type##IER, (ier_val)); \
I915_WRITE(type##IMR, (imr_val)); \
POSTING_READ(type##IMR); \
139,36 → 169,85
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 
/* For display hotplug interrupt */
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits)
{
uint32_t val;
 
assert_spin_locked(&dev_priv->irq_lock);
WARN_ON(bits & ~mask);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
val = I915_READ(PORT_HOTPLUG_EN);
val &= ~mask;
val |= bits;
I915_WRITE(PORT_HOTPLUG_EN, val);
}
 
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
POSTING_READ(DEIMR);
/**
* i915_hotplug_interrupt_update - update hotplug interrupt enable
* @dev_priv: driver private
* @mask: bits to update
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
* of an interrupt context. To avoid that read-modify-write cycles
* interfer, these bits are protected by a spinlock. Since this
* function is usually not called from a context where the lock is
* held already, this function acquires the lock itself. A non-locking
* version is also available.
*/
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits)
{
spin_lock_irq(&dev_priv->irq_lock);
i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
spin_unlock_irq(&dev_priv->irq_lock);
}
}
 
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
/**
* ilk_update_display_irq - update DEIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
uint32_t new_val;
 
assert_spin_locked(&dev_priv->irq_lock);
 
WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
if ((dev_priv->irq_mask & mask) != mask) {
dev_priv->irq_mask |= mask;
new_val = dev_priv->irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
 
if (new_val != dev_priv->irq_mask) {
dev_priv->irq_mask = new_val;
I915_WRITE(DEIMR, dev_priv->irq_mask);
POSTING_READ(DEIMR);
}
}
 
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_display_irq(dev_priv, mask, mask);
}
 
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_display_irq(dev_priv, mask, 0);
}
 
/**
* ilk_update_gt_irq - update GTIMR
* @dev_priv: driver private
181,6 → 260,8
{
assert_spin_locked(&dev_priv->irq_lock);
 
WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
227,6 → 308,8
{
uint32_t new_val;
 
WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
assert_spin_locked(&dev_priv->irq_lock);
 
new_val = dev_priv->pm_irq_mask;
271,6 → 354,7
I915_WRITE(reg, dev_priv->pm_rps_events);
I915_WRITE(reg, dev_priv->pm_rps_events);
POSTING_READ(reg);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
 
290,6 → 374,23
spin_unlock_irq(&dev_priv->irq_lock);
}
 
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
{
/*
* SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*
* TODO: verify if this can be reproduced on VLV,CHV.
*/
if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
 
if (INTEL_INFO(dev_priv)->gen >= 8)
mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
 
return mask;
}
 
void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
302,21 → 403,49
 
spin_lock_irq(&dev_priv->irq_lock);
 
I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
 
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
~dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
 
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
 
spin_unlock_irq(&dev_priv->irq_lock);
}
 
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
uint32_t new_val;
uint32_t old_val;
 
assert_spin_locked(&dev_priv->irq_lock);
 
WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
old_val = I915_READ(GEN8_DE_PORT_IMR);
 
new_val = old_val;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
 
if (new_val != old_val) {
I915_WRITE(GEN8_DE_PORT_IMR, new_val);
POSTING_READ(GEN8_DE_PORT_IMR);
}
}
 
/**
* ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
330,6 → 459,8
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
 
WARN_ON(enabled_irq_mask & ~interrupt_mask);
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
450,6 → 581,7
 
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
* @dev: drm device
*/
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
468,31 → 600,6
spin_unlock_irq(&dev_priv->irq_lock);
}
 
/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
*
* Reading certain registers when the pipe is disabled can hang the chip.
* Use this routine to make sure the PLL is running and the pipe is active
* before reading such registers if unsure.
*/
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Locking is horribly broken here, but whatever. */
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
return intel_crtc->active;
} else {
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
}
}
 
/*
* This timing diagram depicts the video signal in and
* around the vertical blanking period.
543,7 → 650,7
* of horizontal active on the first line of vertical active
*/
 
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
/* Gen2 doesn't have a hardware frame counter */
return 0;
552,24 → 659,15
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
}
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
const struct drm_display_mode *mode =
&intel_crtc->config.adjusted_mode;
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
576,17 → 674,7
vbl_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vbl_start = DIV_ROUND_UP(vbl_start, 2);
} else {
enum transcoder cpu_transcoder = (enum transcoder) pipe;
 
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
if ((I915_READ(PIPECONF(cpu_transcoder)) &
PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
vbl_start = DIV_ROUND_UP(vbl_start, 2);
}
 
/* Convert to pixel count */
vbl_start *= htotal;
 
619,20 → 707,13
return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
}
 
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
 
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
 
return I915_READ(reg);
}
 
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
 
640,7 → 721,7
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
const struct drm_display_mode *mode = &crtc->base.hwmode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
 
654,6 → 735,32
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
/*
* On HSW, the DSL reg (0x70000) appears to return 0 if we
* read it just before the start of vblank. So try it again
* so we don't accidentally end up spanning a vblank frame
* increment, causing the pipe_update_end() code to squak at us.
*
* The nature of this problem means we can't simply check the ISR
* bit and return the vblank start value; nor can we use the scanline
* debug register in the transcoder as it appears to have the same
* problem. We may need to extend this to include other platforms,
* but so far testing only shows the problem on HSW.
*/
if (HAS_DDI(dev) && !position) {
int i, temp;
 
for (i = 0; i < 100; i++) {
udelay(1);
temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
DSL_LINEMASK_GEN3;
if (temp != position) {
position = temp;
break;
}
}
}
 
/*
* See update_scanline_offset() for the details on the
* scanline_offset adjustment.
*/
660,14 → 767,14
return (position + crtc->scanline_offset) % vtotal;
}
 
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
unsigned int flags, int *vpos, int *hpos,
void *stime, void *etime)
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
bool in_vbl = true;
674,7 → 781,7
int ret = 0;
unsigned long irqflags;
 
if (!intel_crtc->active) {
if (WARN_ON(!mode->crtc_clock)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
703,6 → 810,9
 
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
 
/* Get optional system timestamp before query. */
if (stime)
*stime = ktime_get();
 
if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
/* No obvious pixelcount register. Only query vertical
745,6 → 855,9
position = (position + htotal - hsync_start) % vtotal;
}
 
/* Get optional system timestamp after query. */
if (etime)
*etime = ktime_get();
 
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
 
791,7 → 904,7
return position;
}
 
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
798,8 → 911,8
{
struct drm_crtc *crtc;
 
if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
DRM_ERROR("Invalid crtc %d\n", pipe);
if (pipe >= INTEL_INFO(dev)->num_pipes) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
 
806,12 → 919,12
/* Get drm_crtc to timestamp: */
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc == NULL) {
DRM_ERROR("Invalid crtc %d\n", pipe);
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
 
if (!crtc->enabled) {
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
if (!crtc->hwmode.crtc_clock) {
DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
return -EBUSY;
}
 
818,99 → 931,9
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
vblank_time, flags,
crtc,
&to_intel_crtc(crtc)->config.adjusted_mode);
&crtc->hwmode);
}
 
static bool intel_hpd_irq_event(struct drm_device *dev,
struct drm_connector *connector)
{
enum drm_connector_status old_status;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->status;
 
connector->status = connector->funcs->detect(connector, false);
if (old_status == connector->status)
return false;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.id,
connector->name,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->status));
 
return true;
}
 
/*
* Handle hotplug events outside the interrupt handler proper.
*/
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
 
static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
bool hpd_disabled = false;
bool changed = false;
u32 hpd_event_bits;
 
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
spin_lock_irq(&dev_priv->irq_lock);
 
hpd_event_bits = dev_priv->hpd_event_bits;
dev_priv->hpd_event_bits = 0;
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
intel_encoder = intel_connector->encoder;
if (intel_encoder->hpd_pin > HPD_NONE &&
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
connector->polled == DRM_CONNECTOR_POLL_HPD) {
DRM_INFO("HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
connector->name);
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
}
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
connector->name, intel_encoder->hpd_pin);
}
}
/* if there were no outputs to poll, poll was disabled,
* therefore make sure it's enabled when disabling HPD on
* some connectors */
 
spin_unlock_irq(&dev_priv->irq_lock);
 
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
if (intel_encoder->hot_plug)
intel_encoder->hot_plug(intel_encoder);
if (intel_hpd_irq_event(dev, connector))
changed = true;
}
}
mutex_unlock(&mode_config->mutex);
 
}
 
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
950,148 → 973,108
return;
}
 
static void notify_ring(struct drm_device *dev,
struct intel_engine_cs *ring)
static void notify_ring(struct intel_engine_cs *ring)
{
if (!intel_ring_initialized(ring))
return;
 
trace_i915_gem_request_complete(ring);
trace_i915_gem_request_notify(ring);
 
wake_up_all(&ring->irq_queue);
}
 
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
struct intel_rps_ei *rps_ei)
static void vlv_c0_read(struct drm_i915_private *dev_priv,
struct intel_rps_ei *ei)
{
u32 cz_ts, cz_freq_khz;
u32 render_count, media_count;
u32 elapsed_render, elapsed_media, elapsed_time;
u32 residency = 0;
 
cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
 
render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
 
if (rps_ei->cz_clock == 0) {
rps_ei->cz_clock = cz_ts;
rps_ei->render_c0 = render_count;
rps_ei->media_c0 = media_count;
 
return dev_priv->rps.cur_freq;
ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
 
elapsed_time = cz_ts - rps_ei->cz_clock;
rps_ei->cz_clock = cz_ts;
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
const struct intel_rps_ei *old,
const struct intel_rps_ei *now,
int threshold)
{
u64 time, c0;
unsigned int mul = 100;
 
elapsed_render = render_count - rps_ei->render_c0;
rps_ei->render_c0 = render_count;
if (old->cz_clock == 0)
return false;
 
elapsed_media = media_count - rps_ei->media_c0;
rps_ei->media_c0 = media_count;
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
mul <<= 8;
 
/* Convert all the counters into common unit of milli sec */
elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
elapsed_render /= cz_freq_khz;
elapsed_media /= cz_freq_khz;
time = now->cz_clock - old->cz_clock;
time *= threshold * dev_priv->czclk_freq;
 
/*
* Calculate overall C0 residency percentage
* only if elapsed time is non zero
/* Workload can be split between render + media, e.g. SwapBuffers
* being blitted in X after being rendered in mesa. To account for
* this we need to combine both engines into our activity counter.
*/
if (elapsed_time) {
residency =
((max(elapsed_render, elapsed_media) * 100)
/ elapsed_time);
c0 = now->render_c0 - old->render_c0;
c0 += now->media_c0 - old->media_c0;
c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
 
return c0 >= time;
}
 
return residency;
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
dev_priv->rps.up_ei = dev_priv->rps.down_ei;
}
 
/**
* vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
* busy-ness calculated from C0 counters of render & media power wells
* @dev_priv: DRM device private
*
*/
static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
u32 residency_C0_up = 0, residency_C0_down = 0;
int new_delay, adj;
struct intel_rps_ei now;
u32 events = 0;
 
dev_priv->rps.ei_interrupt_count++;
if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
return 0;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
vlv_c0_read(dev_priv, &now);
if (now.cz_clock == 0)
return 0;
 
if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
if (!vlv_c0_above(dev_priv,
&dev_priv->rps.down_ei, &now,
dev_priv->rps.down_threshold))
events |= GEN6_PM_RP_DOWN_THRESHOLD;
dev_priv->rps.down_ei = now;
}
 
if (dev_priv->rps.up_ei.cz_clock == 0) {
vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
return dev_priv->rps.cur_freq;
if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
if (vlv_c0_above(dev_priv,
&dev_priv->rps.up_ei, &now,
dev_priv->rps.up_threshold))
events |= GEN6_PM_RP_UP_THRESHOLD;
dev_priv->rps.up_ei = now;
}
 
 
/*
* To down throttle, C0 residency should be less than down threshold
* for continous EI intervals. So calculate down EI counters
* once in VLV_INT_COUNT_FOR_DOWN_EI
*/
if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
 
dev_priv->rps.ei_interrupt_count = 0;
 
residency_C0_down = vlv_c0_residency(dev_priv,
&dev_priv->rps.down_ei);
} else {
residency_C0_up = vlv_c0_residency(dev_priv,
&dev_priv->rps.up_ei);
return events;
}
 
new_delay = dev_priv->rps.cur_freq;
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
int i;
 
adj = dev_priv->rps.last_adj;
/* C0 residency is greater than UP threshold. Increase Frequency */
if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
if (adj > 0)
adj *= 2;
else
adj = 1;
for_each_ring(ring, dev_priv, i)
if (ring->irq_refcount)
return true;
 
if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
new_delay = dev_priv->rps.cur_freq + adj;
 
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
if (new_delay < dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
 
} else if (!dev_priv->rps.ei_interrupt_count &&
(residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
if (adj < 0)
adj *= 2;
else
adj = -1;
/*
* This means, C0 residency is less than down threshold over
* a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
*/
if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
new_delay = dev_priv->rps.cur_freq + adj;
return false;
}
 
return new_delay;
}
 
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, rps.work);
bool client_boost;
int new_delay, adj, min, max;
u32 pm_iir;
int new_delay, adj;
 
spin_lock_irq(&dev_priv->irq_lock);
/* Speed up work cancelation during disabling rps interrupts. */
1103,32 → 1086,43
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
client_boost = dev_priv->rps.client_boost;
dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
 
/* Make sure we didn't queue anything we're not going to process. */
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
 
if ((pm_iir & dev_priv->pm_rps_events) == 0)
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
return;
 
mutex_lock(&dev_priv->rps.hw_lock);
 
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
 
adj = dev_priv->rps.last_adj;
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
new_delay = dev_priv->rps.cur_freq;
min = dev_priv->rps.min_freq_softlimit;
max = dev_priv->rps.max_freq_softlimit;
 
if (client_boost) {
new_delay = dev_priv->rps.max_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
adj *= 2;
else {
/* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
}
new_delay = dev_priv->rps.cur_freq + adj;
 
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
if (new_delay < dev_priv->rps.efficient_freq)
if (new_delay < dev_priv->rps.efficient_freq - adj) {
new_delay = dev_priv->rps.efficient_freq;
adj = 0;
}
} else if (any_waiters(dev_priv)) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
1135,34 → 1129,25
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
else {
/* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
}
new_delay = dev_priv->rps.cur_freq + adj;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
} else { /* unknown event */
new_delay = dev_priv->rps.cur_freq;
adj = 0;
}
 
dev_priv->rps.last_adj = adj;
 
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
new_delay = clamp_t(int, new_delay,
dev_priv->rps.min_freq_softlimit,
dev_priv->rps.max_freq_softlimit);
new_delay += adj;
new_delay = clamp_t(int, new_delay, min, max);
 
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
intel_set_rps(dev_priv->dev, new_delay);
 
if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay);
else
gen6_set_rps(dev_priv->dev, new_delay);
 
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
1261,9 → 1246,9
{
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
notify_ring(dev, &dev_priv->ring[RCS]);
notify_ring(&dev_priv->ring[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
notify_ring(&dev_priv->ring[VCS]);
}
 
static void snb_gt_irq_handler(struct drm_device *dev,
1273,11 → 1258,11
 
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
notify_ring(dev, &dev_priv->ring[RCS]);
notify_ring(&dev_priv->ring[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
notify_ring(&dev_priv->ring[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
notify_ring(&dev_priv->ring[BCS]);
 
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
1288,65 → 1273,67
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
 
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 master_ctl)
{
struct intel_engine_cs *ring;
u32 rcs, bcs, vcs;
uint32_t tmp = 0;
irqreturn_t ret = IRQ_NONE;
 
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(0));
u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
if (tmp) {
I915_WRITE(GEN8_GT_IIR(0), tmp);
I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
 
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
ring = &dev_priv->ring[RCS];
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[RCS]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[RCS]);
 
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[BCS]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[BCS]);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
 
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(1));
u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
if (tmp) {
I915_WRITE(GEN8_GT_IIR(1), tmp);
I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
 
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
ring = &dev_priv->ring[VCS];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[VCS]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VCS]);
 
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VCS2]);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
 
if (master_ctl & GEN8_GT_VECS_IRQ) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
if (tmp) {
I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
 
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
intel_lrc_irq_handler(&dev_priv->ring[VECS]);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VECS]);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
 
if (master_ctl & GEN8_GT_PM_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(2));
u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) {
I915_WRITE(GEN8_GT_IIR(2),
I915_WRITE_FW(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
gen6_rps_irq_handler(dev_priv, tmp);
1354,182 → 1341,118
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
 
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
 
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
ring = &dev_priv->ring[VECS];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
 
return ret;
}
 
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
 
static int pch_port_to_hotplug_shift(enum port port)
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
case PORT_A:
case PORT_E:
default:
return -1;
return val & PORTA_HOTPLUG_LONG_DETECT;
case PORT_B:
return 0;
return val & PORTB_HOTPLUG_LONG_DETECT;
case PORT_C:
return 8;
case PORT_D:
return 16;
return val & PORTC_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
 
static int i915_port_to_hotplug_shift(enum port port)
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
{
switch (port) {
case PORT_A:
case PORT_E:
return val & PORTE_HOTPLUG_LONG_DETECT;
default:
return -1;
return false;
}
}
 
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
case PORT_A:
return val & PORTA_HOTPLUG_LONG_DETECT;
case PORT_B:
return 17;
return val & PORTB_HOTPLUG_LONG_DETECT;
case PORT_C:
return 19;
return val & PORTC_HOTPLUG_LONG_DETECT;
case PORT_D:
return 21;
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
 
static inline enum port get_port_from_pin(enum hpd_pin pin)
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
{
switch (pin) {
case HPD_PORT_B:
return PORT_B;
case HPD_PORT_C:
return PORT_C;
case HPD_PORT_D:
return PORT_D;
switch (port) {
case PORT_A:
return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
default:
return PORT_A; /* no hpd */
return false;
}
}
 
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
u32 dig_hotplug_reg,
const u32 *hpd)
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
enum port port;
bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
u32 dig_shift;
u32 dig_port_mask = 0;
 
if (!hotplug_trigger)
return;
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
hotplug_trigger, dig_hotplug_reg);
 
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
if (!(hpd[i] & hotplug_trigger))
continue;
 
port = get_port_from_pin(i);
if (port && dev_priv->hpd_irq_port[port]) {
bool long_hpd;
 
if (HAS_PCH_SPLIT(dev)) {
dig_shift = pch_port_to_hotplug_shift(port);
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
} else {
dig_shift = i915_port_to_hotplug_shift(port);
long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
switch (port) {
case PORT_B:
return val & PORTB_HOTPLUG_LONG_DETECT;
case PORT_C:
return val & PORTC_HOTPLUG_LONG_DETECT;
case PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
default:
return false;
}
}
 
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
port_name(port),
long_hpd ? "long" : "short");
/* for long HPD pulses we want to have the digital queue happen,
but we still want HPD storm detection to function. */
if (long_hpd) {
dev_priv->long_hpd_port_mask |= (1 << port);
dig_port_mask |= hpd[i];
} else {
/* for short HPD just trigger the digital queue */
dev_priv->short_hpd_port_mask |= (1 << port);
hotplug_trigger &= ~hpd[i];
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
case PORT_B:
return val & PORTB_HOTPLUG_INT_LONG_PULSE;
case PORT_C:
return val & PORTC_HOTPLUG_INT_LONG_PULSE;
case PORT_D:
return val & PORTD_HOTPLUG_INT_LONG_PULSE;
default:
return false;
}
queue_dig = true;
}
}
 
for (i = 1; i < HPD_NUM_PINS; i++) {
if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
* Get a bit mask of pins that have triggered, and which ones may be long.
* This can be called multiple times with the same masks to accumulate
* hotplug detection results from several registers.
*
* Note that the caller is expected to zero out the masks initially.
*/
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
"Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
hotplug_trigger, i, hpd[i]);
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
bool long_pulse_detect(enum port port, u32 val))
{
enum port port;
int i;
 
for_each_hpd_pin(i) {
if ((hpd[i] & hotplug_trigger) == 0)
continue;
}
 
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
*pin_mask |= BIT(i);
 
// if (!intel_hpd_pin_to_port(i, &port))
continue;
 
if (!(dig_port_mask & hpd[i])) {
dev_priv->hpd_event_bits |= (1 << i);
queue_hp = true;
if (long_pulse_detect(port, dig_hotplug_reg))
*long_mask |= BIT(i);
}
 
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
dev_priv->hpd_stats[i].hpd_last_jiffies
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
dev_priv->hpd_stats[i].hpd_cnt = 0;
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
dev_priv->hpd_event_bits &= ~(1 << i);
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
storm_detected = true;
} else {
dev_priv->hpd_stats[i].hpd_cnt++;
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
dev_priv->hpd_stats[i].hpd_cnt);
}
}
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
hotplug_trigger, dig_hotplug_reg, *pin_mask);
 
if (storm_detected)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock(&dev_priv->irq_lock);
 
/*
* Our hotplug handler can grab modeset locks (by calling down into the
* fb helpers). Hence it must not be run on our own dev-priv->wq work
* queue for otherwise the flush_work in the pageflip code will
* deadlock.
*/
if (queue_hp)
schedule_work(&dev_priv->hotplug_work);
}
 
static void gmbus_irq_handler(struct drm_device *dev)
1647,11 → 1570,6
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
/* TODO: RPS on GEN9+ is not supported yet. */
if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
"GEN9+: unexpected RPS IRQ\n"))
return;
 
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1667,7 → 1585,7
 
if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
notify_ring(&dev_priv->ring[VECS]);
 
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1676,8 → 1594,8
 
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
{
if (!drm_handle_vblank(dev, pipe))
return false;
// if (!drm_handle_vblank(dev, pipe))
// return false;
 
return true;
}
1735,7 → 1653,14
spin_unlock(&dev_priv->irq_lock);
 
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
intel_pipe_handle_vblank(dev, pipe))
/*intel_check_page_flip(dev, pipe)*/;
 
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip(dev, pipe);
}
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
1752,8 → 1677,11
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
u32 pin_mask = 0, long_mask = 0;
 
if (hotplug_status) {
if (!hotplug_status)
return;
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
/*
* Make sure hotplug status is cleared before we clear IIR, or else we
1761,21 → 1689,30
*/
POSTING_READ(PORT_HOTPLUG_STAT);
 
if (IS_G4X(dev)) {
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
} else {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) {
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
hotplug_trigger, hpd_status_g4x,
i9xx_port_hotplug_long_detect);
 
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
// intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
 
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
dp_aux_irq_handler(dev);
} else {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
if (hotplug_trigger) {
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
hotplug_trigger, hpd_status_i915,
i9xx_port_hotplug_long_detect);
// intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
}
}
 
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
1784,6 → 1721,9
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
while (true) {
/* Find, clear, then process each source of interrupt */
 
1828,6 → 1768,9
u32 master_ctl, iir;
irqreturn_t ret = IRQ_NONE;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
for (;;) {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
1848,7 → 1791,7
I915_WRITE(VLV_IIR, iir);
}
 
gen8_gt_irq_handler(dev, dev_priv, master_ctl);
gen8_gt_irq_handler(dev_priv, master_ctl);
 
/* Call regardless, as some status bits might not be
* signalled in iir */
1861,18 → 1804,31
return ret;
}
 
static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd,
pch_port_hotplug_long_detect);
 
// intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
 
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
u32 dig_hotplug_reg;
 
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (hotplug_trigger)
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
 
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
 
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
1963,13 → 1919,10
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
u32 dig_hotplug_reg;
 
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (hotplug_trigger)
ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
 
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
 
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
1999,11 → 1952,64
cpt_serr_int_handler(dev);
}
 
static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
u32 pin_mask = 0, long_mask = 0;
 
if (hotplug_trigger) {
u32 dig_hotplug_reg;
 
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd_spt,
spt_port_hotplug_long_detect);
}
 
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
 
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
 
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
dig_hotplug_reg, hpd_spt,
spt_port_hotplug2_long_detect);
}
 
if (pch_iir & SDE_GMBUS_CPT)
gmbus_irq_handler(dev);
}
 
static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
 
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd,
ilk_port_hotplug_long_detect);
 
}
 
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
 
if (hotplug_trigger)
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
 
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
 
2014,6 → 2020,9
DRM_ERROR("Poison interrupt\n");
 
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe) &&
intel_pipe_handle_vblank(dev, pipe))
/*intel_check_page_flip(dev, pipe)*/;
 
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2049,7 → 2058,11
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
 
if (hotplug_trigger)
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
 
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
 
2060,6 → 2073,9
intel_opregion_asle_intr(dev);
 
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
intel_pipe_handle_vblank(dev, pipe))
/*intel_check_page_flip(dev, pipe)*/;
 
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2094,6 → 2110,9
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
2155,6 → 2174,21
return ret;
}
 
static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd,
bxt_port_hotplug_long_detect);
 
}
 
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
2165,21 → 2199,23
enum pipe pipe;
u32 aux_mask = GEN8_AUX_CHANNEL_A;
 
if (IS_GEN9(dev))
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
if (INTEL_INFO(dev_priv)->gen >= 9)
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
 
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
return IRQ_NONE;
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
 
/* Find, clear, then process each source of interrupt */
 
ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
ret = gen8_gt_irq_handler(dev_priv, master_ctl);
 
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
2198,12 → 2234,36
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
if (tmp) {
bool found = false;
u32 hotplug_trigger = 0;
 
if (IS_BROXTON(dev_priv))
hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
 
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
 
if (tmp & aux_mask)
if (tmp & aux_mask) {
dp_aux_irq_handler(dev);
found = true;
}
 
if (hotplug_trigger) {
if (IS_BROXTON(dev))
bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
else
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
found = true;
}
 
if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev);
found = true;
}
 
if (!found)
DRM_ERROR("Unexpected DE Port interrupt\n");
}
else
2222,7 → 2282,7
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
 
 
if (IS_GEN9(dev))
if (INTEL_INFO(dev_priv)->gen >= 9)
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
else
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2236,7 → 2296,7
pipe);
 
 
if (IS_GEN9(dev))
if (INTEL_INFO(dev_priv)->gen >= 9)
fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2249,7 → 2309,8
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
 
if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
master_ctl & GEN8_DE_PCH_IRQ) {
/*
* FIXME(BDW): Assume for now that the new interrupt handling
* scheme also closed the SDE interrupt handling race we've seen
2259,6 → 2320,10
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
ret = IRQ_HANDLED;
 
if (HAS_PCH_SPT(dev_priv))
spt_irq_handler(dev, pch_iir);
else
cpt_irq_handler(dev, pch_iir);
} else
DRM_ERROR("The master control interrupt lied (SDE)!\n");
2265,8 → 2330,8
 
}
 
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ);
 
return ret;
}
2298,19 → 2363,16
}
 
/**
* i915_error_work_func - do process context error handling work
* @work: work struct
* i915_reset_and_wakeup - do process context error handling work
* @dev: drm device
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
static void i915_error_work_func(struct work_struct *work)
static void i915_reset_and_wakeup(struct drm_device *dev)
{
struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
work);
struct drm_i915_private *dev_priv =
container_of(error, struct drm_i915_private, gpu_error);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gpu_error *error = &dev_priv->gpu_error;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2328,6 → 2390,7
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
intel_runtime_pm_get(dev_priv);
 
/*
* All state reset _must_ be completed before we update the
2337,8 → 2400,10
*/
// ret = i915_reset(dev);
 
// intel_display_handle_reset(dev);
// intel_finish_reset(dev);
 
intel_runtime_pm_put(dev_priv);
 
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
2350,6 → 2415,7
* updates before
* the counter increment.
*/
smp_mb__before_atomic();
atomic_inc(&dev_priv->gpu_error.reset_counter);
 
} else {
2457,10 → 2523,10
}
 
/**
* i915_handle_error - handle an error interrupt
* i915_handle_error - handle a gpu error
* @dev: drm device
*
* Do some basic checking of regsiter state at error interrupt time and
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
2481,13 → 2547,13
i915_report_and_clear_eir(dev);
 
if (wedged) {
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter);
 
/*
* Wakeup waiting processes so that the reset work function
* i915_error_work_func doesn't deadlock trying to grab various
* locks. By bumping the reset counter first, the woken
* Wakeup waiting processes so that the reset function
* i915_reset_and_wakeup doesn't deadlock trying to grab
* various locks. By bumping the reset counter first, the woken
* processes will see a reset in progress and back off,
* releasing their locks and then wait for the reset completion.
* We must do this for _all_ gpu waiters that might hold locks
2500,26 → 2566,17
i915_error_wake_up(dev_priv, false);
}
 
/*
* Our reset work can grab modeset locks (since it needs to reset the
* state of outstanding pagelips). Hence it must not be run on our own
* dev-priv->wq work queue for otherwise the flush_work in the pageflip
* code will deadlock.
*/
schedule_work(&dev_priv->gpu_error.work);
i915_reset_and_wakeup(dev);
}
 
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
static int i915_enable_vblank(struct drm_device *dev, int pipe)
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
2532,7 → 2589,7
return 0;
}
 
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
2539,9 → 2596,6
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2549,14 → 2603,11
return 0;
}
 
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
2565,14 → 2616,11
return 0;
}
 
static int gen8_enable_vblank(struct drm_device *dev, int pipe)
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2584,7 → 2632,7
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
static void i915_disable_vblank(struct drm_device *dev, int pipe)
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
2596,7 → 2644,7
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
2608,7 → 2656,7
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
2619,14 → 2667,11
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void gen8_disable_vblank(struct drm_device *dev, int pipe)
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2634,18 → 2679,11
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static u32
ring_last_seqno(struct intel_engine_cs *ring)
{
return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno;
}
 
static bool
ring_idle(struct intel_engine_cs *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
i915_seqno_passed(seqno, ring->last_submitted_seqno));
}
 
static bool
2701,6 → 2739,26
u64 offset = 0;
int i, backwards;
 
/*
* This function does not support execlist mode - any attempt to
* proceed further into this function will result in a kernel panic
* when dereferencing ring->buffer, which is not set up in execlist
* mode.
*
* The correct way of doing it would be to derive the currently
* executing ring buffer from the current context, which is derived
* from the currently running request. Unfortunately, to get the
* current request we would have to grab the struct_mutex before doing
* anything else, which would be ill-advised since some other thread
* might have grabbed it already and managed to hang itself, causing
* the hang checker to deadlock.
*
* Therefore, this function does not support execlist mode in its
* current form. Just return NULL and move on.
*/
if (ring->buffer == NULL)
return NULL;
 
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
return NULL;
2831,7 → 2889,7
return HANGCHECK_HUNG;
}
 
/**
/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased.
2839,10 → 2897,12
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
static void i915_hangcheck_elapsed(unsigned long data)
static void i915_hangcheck_elapsed(struct work_struct *work)
{
struct drm_device *dev = (struct drm_device *)data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring;
int i;
int busy_count = 0, rings_hung = 0;
2868,12 → 2928,20
if (ring_idle(ring, seqno)) {
ring->hangcheck.action = HANGCHECK_IDLE;
 
// if (waitqueue_active(&ring->irq_queue)) {
if (waitqueue_active(&ring->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
// DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
// ring->name);
// wake_up_all(&ring->irq_queue);
// } else
if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
ring->name);
else
DRM_INFO("Fake missed irq on %s\n",
ring->name);
wake_up_all(&ring->irq_queue);
}
/* Safeguard against driver failure */
ring->hangcheck.score += BUSY;
} else
busy = false;
} else {
/* We always increment the hangcheck score
3004,7 → 3072,7
{
enum pipe pipe;
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
for_each_pipe(dev_priv, pipe)
3057,17 → 3125,27
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
 
if (HAS_PCH_SPLIT(dev))
ibx_irq_reset(dev);
}
 
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
 
spin_lock_irq(&dev_priv->irq_lock);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
if (pipe_mask & 1 << PIPE_A)
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
dev_priv->de_irq_mask[PIPE_A],
~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
if (pipe_mask & 1 << PIPE_B)
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
dev_priv->de_irq_mask[PIPE_B],
~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
if (pipe_mask & 1 << PIPE_C)
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
dev_priv->de_irq_mask[PIPE_C],
~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
3088,22 → 3166,31
vlv_display_irq_reset(dev_priv);
}
 
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
const u32 hpd[HPD_NUM_PINS])
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
 
for_each_intel_encoder(dev, encoder)
if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
 
return enabled_irqs;
}
 
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
u32 hotplug_irqs, hotplug, enabled_irqs;
 
if (HAS_PCH_IBX(dev)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
}
 
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3110,9 → 3197,8
 
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
*
* This register is the same on all known PCH chips.
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3119,9 → 3205,87
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
/*
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
if (HAS_PCH_LPT_LP(dev))
hotplug |= PORTA_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
 
static void spt_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
 
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
 
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
/* Enable digital hotplug on the PCH */
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 
hotplug = I915_READ(PCH_PORT_HOTPLUG2);
hotplug |= PORTE_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
}
 
static void ilk_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
 
if (INTEL_INFO(dev)->gen >= 8) {
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
 
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else if (INTEL_INFO(dev)->gen >= 7) {
hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
 
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else {
hotplug_irqs = DE_DP_A_HOTPLUG;
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
 
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
}
 
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
 
ibx_hpd_irq_setup(dev);
}
 
static void bxt_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
 
enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
 
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
 
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
PORTA_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
 
static void ibx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3135,7 → 3299,7
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
 
GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
gen5_assert_iir_is_zero(dev_priv, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
}
 
3187,7 → 3351,8
DE_PLANEB_FLIP_DONE_IVB |
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
DE_DP_A_HOTPLUG_IVB);
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3194,8 → 3359,9
DE_AUX_CHANNEL_A |
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
DE_POISON);
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_DP_A_HOTPLUG);
}
 
dev_priv->irq_mask = ~display_mask;
3322,7 → 3488,7
{
dev_priv->irq_mask = ~0;
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
I915_WRITE(VLV_IIR, 0xffffffff);
3391,21 → 3557,31
{
uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
uint32_t de_pipe_enables;
int pipe;
u32 aux_en = GEN8_AUX_CHANNEL_A;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
enum pipe pipe;
 
if (IS_GEN9(dev_priv)) {
if (INTEL_INFO(dev_priv)->gen >= 9) {
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
} else
if (IS_BROXTON(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
 
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
GEN8_PIPE_FIFO_UNDERRUN;
 
de_port_enables = de_port_masked;
if (IS_BROXTON(dev_priv))
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
 
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3417,7 → 3593,7
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
 
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
}
 
static int gen8_irq_postinstall(struct drm_device *dev)
3424,11 → 3600,13
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_SPLIT(dev))
ibx_irq_pre_postinstall(dev);
 
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
 
if (HAS_PCH_SPLIT(dev))
ibx_irq_postinstall(dev);
 
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3543,14 → 3721,12
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
I915_WRITE16(IMR, dev_priv->irq_mask);
 
I915_WRITE16(IER,
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT);
POSTING_READ16(IER);
 
3573,14 → 3749,12
struct drm_i915_private *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
// if (!drm_handle_vblank(dev, pipe))
if (!intel_pipe_handle_vblank(dev, pipe))
return false;
 
if ((iir & flip_pending) == 0)
goto check_page_flip;
 
// intel_prepare_page_flip(dev, pipe);
 
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3590,7 → 3764,8
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
 
intel_finish_page_flip(dev, pipe);
// intel_prepare_page_flip(dev, plane);
// intel_finish_page_flip(dev, pipe);
return true;
 
check_page_flip:
3609,6 → 3784,9
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
iir = I915_READ16(IIR);
if (iir == 0)
return IRQ_NONE;
3639,7 → 3817,7
new_iir = I915_READ16(IIR); /* Flush posted writes */
 
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
notify_ring(&dev_priv->ring[RCS]);
 
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
3687,7 → 3865,7
int pipe;
 
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
 
3712,18 → 3890,16
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
 
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
 
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
/* Enable in IER... */
3757,14 → 3933,11
struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
// if (!drm_handle_vblank(dev, pipe))
return false;
 
if ((iir & flip_pending) == 0)
goto check_page_flip;
 
// intel_prepare_page_flip(dev, plane);
 
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3774,11 → 3947,9
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
 
intel_finish_page_flip(dev, pipe);
return true;
 
check_page_flip:
// intel_check_page_flip(dev, pipe);
return false;
}
 
3792,6 → 3963,9
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
int pipe, ret = IRQ_NONE;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
iir = I915_READ(IIR);
do {
bool irq_received = (iir & ~flip_mask) != 0;
3830,7 → 4004,7
new_iir = I915_READ(IIR); /* Flush posted writes */
 
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
notify_ring(&dev_priv->ring[RCS]);
 
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
3883,7 → 4057,7
int pipe;
 
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
 
3904,7 → 4078,7
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
I915_WRITE(HWSTAM, 0xeffe);
3965,7 → 4139,7
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
i915_enable_asle_pipestat(dev);
3976,19 → 4150,13
static void i915_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
u32 hotplug_en;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (I915_HAS_HOTPLUG(dev)) {
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
3995,13 → 4163,15
*/
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
i915_hotplug_interrupt_update_locked(dev_priv,
HOTPLUG_INT_EN_MASK |
CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
CRT_HOTPLUG_ACTIVATION_PERIOD_64,
hotplug_en);
}
}
 
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
4014,6 → 4184,9
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
 
iir = I915_READ(IIR);
 
for (;;) {
4056,9 → 4229,9
new_iir = I915_READ(IIR); /* Flush posted writes */
 
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
notify_ring(&dev_priv->ring[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
notify_ring(&dev_priv->ring[VCS]);
 
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4110,7 → 4283,7
if (!dev_priv)
return;
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
I915_WRITE(HWSTAM, 0xffffffff);
4125,46 → 4298,6
I915_WRITE(IIR, I915_READ(IIR));
}
 
static void intel_hpd_irq_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
int i;
 
intel_runtime_pm_get(dev_priv);
 
spin_lock_irq(&dev_priv->irq_lock);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector;
 
if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
continue;
 
dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
 
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
 
if (intel_connector->encoder->hpd_pin == i) {
if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
connector->name);
connector->polled = intel_connector->polled;
if (!connector->polled)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
}
}
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
 
intel_runtime_pm_put(dev_priv);
}
 
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
4176,9 → 4309,8
{
struct drm_device *dev = dev_priv->dev;
 
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
// INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
// intel_hpd_init_work(dev_priv);
 
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
4185,12 → 4317,12
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
intel_hpd_irq_reenable_work);
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
 
 
if (IS_GEN2(dev_priv)) {
4198,7 → 4330,7
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
dev->driver->get_vblank_counter = g4x_get_vblank_counter;
} else {
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4212,10 → 4344,8
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
 
if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
4240,7 → 4370,12
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
4248,7 → 4383,7
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
if (INTEL_INFO(dev_priv)->gen == 2) {
} else if (INTEL_INFO(dev_priv)->gen == 3) {
4256,14 → 4391,14
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
}
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
4270,46 → 4405,6
}
 
/**
* intel_hpd_init - initializes and enables hpd support
* @dev_priv: i915 device instance
*
* This function enables the hotplug support. It requires that interrupts have
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
* poll request can run concurrently to other code, so locking rules must be
* obeyed.
*
* This is a separate step from interrupt enabling to simplify the locking rules
* in the driver load and resume code.
*/
void intel_hpd_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
int i;
 
for (i = 1; i < HPD_NUM_PINS; i++) {
dev_priv->hpd_stats[i].hpd_cnt = 0;
dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
}
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
connector->polled = DRM_CONNECTOR_POLL_HPD;
if (intel_connector->mst_port)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy. */
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
/**
* intel_irq_install - enables the hardware interrupt
* @dev_priv: i915 device instance
*
/drivers/video/drm/i915/i915_params.c
27,46 → 27,46
struct i915_params i915 __read_mostly = {
.modeset = 1,
.panel_ignore_lid = 1,
.powersave = 1,
.semaphores = -1,
.lvds_downclock = 0,
.lvds_channel_mode = 0,
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
.enable_execlists = 0,
.enable_execlists = -1,
.enable_hangcheck = true,
.enable_ppgtt = 1,
.enable_ppgtt = -1,
.enable_psr = 0,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = 1,
.disable_power_well = -1,
.enable_ips = 1,
.fastboot = 1,
.prefault_disable = 0,
.load_detect_test = 0,
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 0,
.disable_vtd_wa = 0,
.disable_vtd_wa = 1,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
.edp_vswing = 0,
.enable_guc_submission = false,
.guc_log_level = -1,
};
 
module_param_named(modeset, i915.modeset, int, 0400);
MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
 
module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
 
module_param_named(powersave, i915.powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
85,30 → 85,25
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
 
module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
 
module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
 
module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
 
module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
module_param_named(reset, i915.reset, bool, 0600);
module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
119,23 → 114,24
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
 
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
"(-1=auto, 0=disabled [default], 1=enabled)");
"(-1=auto [default], 0=disabled, 1=enabled)");
 
module_param_named(enable_psr, i915.enable_psr, int, 0600);
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
 
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support.");
 
module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: true)");
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
 
module_param_named(enable_ips, i915.enable_ips, int, 0600);
module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
module_param_named(fastboot, i915.fastboot, bool, 0600);
142,12 → 138,17
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
 
module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
 
module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
MODULE_PARM_DESC(load_detect_test,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
 
module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
158,18 → 159,40
module_param_named(disable_display, i915.disable_display, bool, 0600);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
 
module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
 
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
 
module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
 
module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
MODULE_PARM_DESC(mmio_debug,
"Enable the MMIO debug code (default: false). This may negatively "
"affect performance.");
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
 
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
 
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
MODULE_PARM_DESC(nuclear_pageflip,
"Force atomic modeset functionality; asynchronous mode is not yet supported. (default: false).");
 
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
MODULE_PARM_DESC(edp_vswing,
"Ignore/Override vswing pre-emph table selection from VBT "
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
 
module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
 
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
/drivers/video/drm/i915/i915_reg.h
31,6 → 31,8
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
(port) == PORT_B ? (b) : (c))
 
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
48,12 → 50,17
 
/* PCI config space */
 
#define HPLLCC 0xc0 /* 855 only */
#define GC_CLOCK_CONTROL_MASK (0xf << 0)
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
#define GC_CLOCK_133_200 (0 << 0)
#define GC_CLOCK_100_200 (1 << 0)
#define GC_CLOCK_100_133 (2 << 0)
#define GC_CLOCK_166_250 (3 << 0)
#define GC_CLOCK_133_266 (3 << 0)
#define GC_CLOCK_133_200_2 (4 << 0)
#define GC_CLOCK_133_266_2 (5 << 0)
#define GC_CLOCK_166_266 (6 << 0)
#define GC_CLOCK_166_250 (7 << 0)
 
#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
98,7 → 105,7
#define GRDOM_RESET_STATUS (1<<1)
#define GRDOM_RESET_ENABLE (1<<0)
 
#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
#define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4)
#define ILK_GRDOM_FULL (0<<1)
#define ILK_GRDOM_RENDER (1<<1)
#define ILK_GRDOM_MEDIA (3<<1)
137,8 → 144,23
#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
 
#define GEN8_R_PWR_CLK_STATE 0x20C8
#define GEN8_RPCS_ENABLE (1 << 31)
#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
#define GEN8_RPCS_S_CNT_SHIFT 15
#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
#define GEN8_RPCS_SS_CNT_SHIFT 8
#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
#define GEN8_RPCS_EU_MAX_SHIFT 4
#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT)
#define GEN8_RPCS_EU_MIN_SHIFT 0
#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
 
#define GAM_ECOCHK 0x4090
#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10)
#define ECOCHK_DIS_TLB (1<<8)
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
156,14 → 178,23
#define GAB_CTL 0x24000
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
 
#define GEN7_BIOS_RESERVED 0x1082C0
#define GEN7_BIOS_RESERVED_1M (0 << 5)
#define GEN7_BIOS_RESERVED_256K (1 << 5)
#define GEN8_BIOS_RESERVED_SHIFT 7
#define GEN7_BIOS_RESERVED_MASK 0x1
#define GEN8_BIOS_RESERVED_MASK 0x3
#define GEN6_STOLEN_RESERVED 0x1082C0
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
#define GEN6_STOLEN_RESERVED_SIZE_MASK (3 << 4)
#define GEN6_STOLEN_RESERVED_1M (0 << 4)
#define GEN6_STOLEN_RESERVED_512K (1 << 4)
#define GEN6_STOLEN_RESERVED_256K (2 << 4)
#define GEN6_STOLEN_RESERVED_128K (3 << 4)
#define GEN7_STOLEN_RESERVED_SIZE_MASK (1 << 5)
#define GEN7_STOLEN_RESERVED_1M (0 << 5)
#define GEN7_STOLEN_RESERVED_256K (1 << 5)
#define GEN8_STOLEN_RESERVED_SIZE_MASK (3 << 7)
#define GEN8_STOLEN_RESERVED_1M (0 << 7)
#define GEN8_STOLEN_RESERVED_2M (1 << 7)
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
 
 
/* VGA stuff */
 
#define VGA_ST01_MDA 0x3ba
217,6 → 248,8
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
#define INSTR_MEDIA_SUBCLIENT 0x2
#define INSTR_26_TO_24_MASK 0x7000000
#define INSTR_26_TO_24_SHIFT 24
 
/*
* Memory interface instructions used by the kernel
246,6 → 279,7
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_SET_APPID MI_INSTR(0x0e, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
297,6 → 331,8
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
#define HSW_MI_RS_SAVE_STATE_EN (1<<3)
#define HSW_MI_RS_RESTORE_STATE_EN (1<<2)
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
303,8 → 339,9
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
#define MI_USE_GGTT (1 << 22) /* g4x+ */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
315,8 → 352,8
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
327,6 → 364,8
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
336,6 → 375,7
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
#define MI_BATCH_RESOURCE_STREAMER (1<<10)
 
#define MI_PREDICATE_SRC0 (0x2400)
#define MI_PREDICATE_SRC1 (0x2408)
389,7 → 429,8
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
#define PIPE_CONTROL_FLUSH_L3 (1<<27)
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
406,6 → 447,7
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
429,7 → 471,6
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 0)
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
470,6 → 511,7
*/
#define BCS_SWCTRL 0x22200
 
#define GPGPU_THREADS_DISPATCHED 0x2290
#define HS_INVOCATION_COUNT 0x2300
#define DS_INVOCATION_COUNT 0x2308
#define IA_VERTICES_COUNT 0x2310
494,6 → 536,10
#define GEN7_3DPRIM_START_INSTANCE 0x243C
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
 
#define GEN7_GPGPU_DISPATCHDIMX 0x2500
#define GEN7_GPGPU_DISPATCHDIMY 0x2504
#define GEN7_GPGPU_DISPATCHDIMZ 0x2508
 
#define OACONTROL 0x2360
 
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
545,6 → 591,9
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */
#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */
#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */
#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
571,14 → 620,23
PUNIT_POWER_WELL_DPIO_RX0 = 10,
PUNIT_POWER_WELL_DPIO_RX1 = 11,
PUNIT_POWER_WELL_DPIO_CMN_D = 12,
/* FIXME: guesswork below */
PUNIT_POWER_WELL_DPIO_TX_D_LANES_01 = 13,
PUNIT_POWER_WELL_DPIO_TX_D_LANES_23 = 14,
PUNIT_POWER_WELL_DPIO_RX2 = 15,
 
PUNIT_POWER_WELL_NUM,
};
 
enum skl_disp_power_wells {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_DDI_A_E,
SKL_DISP_PW_DDI_B,
SKL_DISP_PW_DDI_C,
SKL_DISP_PW_DDI_D,
SKL_DISP_PW_1 = 14,
SKL_DISP_PW_2,
};
 
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
 
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
598,6 → 656,20
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
 
#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136
#define FB_GFX_FREQ_FUSE_MASK 0xff
#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24
#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16
#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8
 
#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137
#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8
 
#define PUNIT_REG_DDR_SETUP2 0x139
#define FORCE_DDR_FREQ_REQ_ACK (1 << 8)
#define FORCE_DDR_LOW_FREQ (1 << 1)
#define FORCE_DDR_HIGH_FREQ (1 << 0)
 
#define PUNIT_GPU_STATUS_REG 0xdb
#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
619,10 → 691,13
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
 
#define VLV_TURBO_SOC_OVERRIDE 0x04
#define VLV_OVERRIDE_EN 1
#define VLV_SOC_TDP_EN (1 << 1)
#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
 
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
#define VLV_RP_UP_EI_THRESHOLD 90
#define VLV_RP_DOWN_EI_THRESHOLD 70
#define VLV_INT_COUNT_FOR_DOWN_EI 5
 
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
657,17 → 732,18
#define DSI_PLL_N1_DIV_MASK (3 << 16)
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_CZ_CLOCK_CONTROL 0x62
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
#define CCK_TRUNK_FORCE_ON (1 << 17)
#define CCK_TRUNK_FORCE_OFF (1 << 16)
#define CCK_FREQUENCY_STATUS (0x1f << 8)
#define CCK_FREQUENCY_STATUS_SHIFT 8
#define CCK_FREQUENCY_VALUES (0x1f << 0)
 
/**
* DOC: DPIO
*
* VLV and CHV have slightly peculiar display PHYs for driving DP/HDMI
* VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
* ports. DPIO is the name given to such a display PHY. These PHYs
* don't follow the standard programming model using direct MMIO
* registers, and instead their registers must be accessed trough IOSF
698,7 → 774,7
* controlled from the display controller side. No DPIO registers
* need to be accessed during AUX communication,
*
* Generally the common lane corresponds to the pipe and
* Generally on VLV/CHV the common lane corresponds to the pipe and
* the spline (PCS/TX) corresponds to the port.
*
* For dual channel PHY (VLV/CHV):
720,11 → 796,17
*
* port D == PCS/TX CH0
*
* Note: digital port B is DDI0, digital port C is DDI1,
* digital port D is DDI2
* On BXT the entire PHY channel corresponds to the port. That means
* the PLL is also now associated with the port rather than the pipe,
* and so the clock needs to be routed to the appropriate transcoder.
* Port A PLL is directly connected to transcoder EDP and port B/C
* PLLs can be routed to any transcoder A/B/C.
*
* Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
* digital port D (CHV) or port A (BXT).
*/
/*
* Dual channel PHY (VLV/CHV)
* Dual channel PHY (VLV/CHV/BXT)
* ---------------------------------
* | CH0 | CH1 |
* | CMN/PLL/REF | CMN/PLL/REF |
736,7 → 818,7
* | DDI0 | DDI1 | DP/HDMI ports
* ---------------------------------
*
* Single channel PHY (CHV)
* Single channel PHY (CHV/BXT)
* -----------------
* | CH0 |
* | CMN/PLL/REF |
901,6 → 983,7
 
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
913,8 → 996,20
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
 
#define _VLV_PCS01_DW12_CH0 0x0230
#define _VLV_PCS23_DW12_CH0 0x0430
#define _VLV_PCS01_DW12_CH1 0x2630
#define _VLV_PCS23_DW12_CH1 0x2830
#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
 
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
 
#define _VLV_PCS_DW14_CH0 0x8238
986,6 → 1081,7
#define DPIO_CHV_FIRST_MOD (0 << 8)
#define DPIO_CHV_SECOND_MOD (1 << 8)
#define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0
#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0)
#define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1)
 
#define _CHV_PLL_DW6_CH0 0x8018
995,6 → 1091,25
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
 
#define _CHV_PLL_DW8_CH0 0x8020
#define _CHV_PLL_DW8_CH1 0x81A0
#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0
#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0)
#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1)
 
#define _CHV_PLL_DW9_CH0 0x8024
#define _CHV_PLL_DW9_CH1 0x81A4
#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */
#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1)
#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
 
#define _CHV_CMN_DW0_CH0 0x8100
#define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19
#define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18
#define DPIO_ALLDL_POWERDOWN (1 << 1)
#define DPIO_ANYDL_POWERDOWN (1 << 0)
 
#define _CHV_CMN_DW5_CH0 0x8114
#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
1031,10 → 1146,23
 
#define _CHV_CMN_DW19_CH0 0x814c
#define _CHV_CMN_DW6_CH1 0x8098
#define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */
#define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */
#define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */
#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
 
#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
 
#define CHV_CMN_DW28 0x8170
#define DPIO_CL1POWERDOWNEN (1 << 23)
#define DPIO_DYNPWRDOWNEN_CH0 (1 << 22)
#define DPIO_SUS_CLK_CONFIG_ON (0 << 0)
#define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0)
#define DPIO_SUS_CLK_CONFIG_GATE (2 << 0)
#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0)
 
#define CHV_CMN_DW30 0x8178
#define DPIO_CL2_LDOFUSE_PWRENB (1 << 6)
#define DPIO_LRC_BYPASS (1 << 3)
 
#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
1055,11 → 1183,274
#define DPIO_FRC_LATENCY_SHFIT 8
#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
#define DPIO_UPAR_SHIFT 30
 
/* BXT PHY registers */
#define _BXT_PHY(phy, a, b) _PIPE((phy), (a), (b))
 
#define BXT_P_CR_GT_DISP_PWRON 0x138090
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
 
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
#define COMMON_RESET_DIS (1 << 31)
#define BXT_PHY_CTL_FAMILY(phy) _BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
_PHY_CTL_FAMILY_EDP)
 
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
#define _PORT_PLL_B 0x46078
#define _PORT_PLL_C 0x4607c
#define PORT_PLL_ENABLE (1 << 31)
#define PORT_PLL_LOCK (1 << 30)
#define PORT_PLL_REF_SEL (1 << 27)
#define BXT_PORT_PLL_ENABLE(port) _PORT(port, _PORT_PLL_A, _PORT_PLL_B)
 
#define _PORT_PLL_EBB_0_A 0x162034
#define _PORT_PLL_EBB_0_B 0x6C034
#define _PORT_PLL_EBB_0_C 0x6C340
#define PORT_PLL_P1_SHIFT 13
#define PORT_PLL_P1_MASK (0x07 << PORT_PLL_P1_SHIFT)
#define PORT_PLL_P1(x) ((x) << PORT_PLL_P1_SHIFT)
#define PORT_PLL_P2_SHIFT 8
#define PORT_PLL_P2_MASK (0x1f << PORT_PLL_P2_SHIFT)
#define PORT_PLL_P2(x) ((x) << PORT_PLL_P2_SHIFT)
#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \
_PORT_PLL_EBB_0_B, \
_PORT_PLL_EBB_0_C)
 
#define _PORT_PLL_EBB_4_A 0x162038
#define _PORT_PLL_EBB_4_B 0x6C038
#define _PORT_PLL_EBB_4_C 0x6C344
#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
#define PORT_PLL_RECALIBRATE (1 << 14)
#define BXT_PORT_PLL_EBB_4(port) _PORT3(port, _PORT_PLL_EBB_4_A, \
_PORT_PLL_EBB_4_B, \
_PORT_PLL_EBB_4_C)
 
#define _PORT_PLL_0_A 0x162100
#define _PORT_PLL_0_B 0x6C100
#define _PORT_PLL_0_C 0x6C380
/* PORT_PLL_0_A */
#define PORT_PLL_M2_MASK 0xFF
/* PORT_PLL_1_A */
#define PORT_PLL_N_SHIFT 8
#define PORT_PLL_N_MASK (0x0F << PORT_PLL_N_SHIFT)
#define PORT_PLL_N(x) ((x) << PORT_PLL_N_SHIFT)
/* PORT_PLL_2_A */
#define PORT_PLL_M2_FRAC_MASK 0x3FFFFF
/* PORT_PLL_3_A */
#define PORT_PLL_M2_FRAC_ENABLE (1 << 16)
/* PORT_PLL_6_A */
#define PORT_PLL_PROP_COEFF_MASK 0xF
#define PORT_PLL_INT_COEFF_MASK (0x1F << 8)
#define PORT_PLL_INT_COEFF(x) ((x) << 8)
#define PORT_PLL_GAIN_CTL_MASK (0x07 << 16)
#define PORT_PLL_GAIN_CTL(x) ((x) << 16)
/* PORT_PLL_8_A */
#define PORT_PLL_TARGET_CNT_MASK 0x3FF
/* PORT_PLL_9_A */
#define PORT_PLL_LOCK_THRESHOLD_SHIFT 1
#define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
/* PORT_PLL_10_A */
#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
#define PORT_PLL_DCO_AMP_DEFAULT 15
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) ((x)<<10)
#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
_PORT_PLL_0_B, \
_PORT_PLL_0_C)
#define BXT_PORT_PLL(port, idx) (_PORT_PLL_BASE(port) + (idx) * 4)
 
/* BXT PHY common lane registers */
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
_PORT_CL1CM_DW0_A)
 
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
_PORT_CL1CM_DW9_A)
 
#define _PORT_CL1CM_DW10_A 0x162028
#define _PORT_CL1CM_DW10_BC 0x6C028
#define IREF1RC_OFFSET_SHIFT 8
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
_PORT_CL1CM_DW10_A)
 
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
#define SUS_CLK_CONFIG 0x3
#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
_PORT_CL1CM_DW28_A)
 
#define _PORT_CL1CM_DW30_A 0x162078
#define _PORT_CL1CM_DW30_BC 0x6C078
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
_PORT_CL1CM_DW30_A)
 
/* Defined for PHY0 only */
#define BXT_PORT_CL2CM_DW6_BC 0x6C358
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
 
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
#define GRC_DONE (1 << 22)
#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC, \
_PORT_REF_DW3_A)
 
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
/*
* FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
* after testing.
*/
#define GRC_CODE_SHIFT 23
#define GRC_CODE_MASK (0x1FF << GRC_CODE_SHIFT)
#define GRC_CODE_FAST_SHIFT 16
#define GRC_CODE_FAST_MASK (0x7F << GRC_CODE_FAST_SHIFT)
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC, \
_PORT_REF_DW6_A)
 
#define _PORT_REF_DW8_A 0x1621A0
#define _PORT_REF_DW8_BC 0x6C1A0
#define GRC_DIS (1 << 15)
#define GRC_RDY_OVRD (1 << 1)
#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC, \
_PORT_REF_DW8_A)
 
/* BXT PHY PCS registers */
#define _PORT_PCS_DW10_LN01_A 0x162428
#define _PORT_PCS_DW10_LN01_B 0x6C428
#define _PORT_PCS_DW10_LN01_C 0x6C828
#define _PORT_PCS_DW10_GRP_A 0x162C28
#define _PORT_PCS_DW10_GRP_B 0x6CC28
#define _PORT_PCS_DW10_GRP_C 0x6CE28
#define BXT_PORT_PCS_DW10_LN01(port) _PORT3(port, _PORT_PCS_DW10_LN01_A, \
_PORT_PCS_DW10_LN01_B, \
_PORT_PCS_DW10_LN01_C)
#define BXT_PORT_PCS_DW10_GRP(port) _PORT3(port, _PORT_PCS_DW10_GRP_A, \
_PORT_PCS_DW10_GRP_B, \
_PORT_PCS_DW10_GRP_C)
#define TX2_SWING_CALC_INIT (1 << 31)
#define TX1_SWING_CALC_INIT (1 << 30)
 
#define _PORT_PCS_DW12_LN01_A 0x162430
#define _PORT_PCS_DW12_LN01_B 0x6C430
#define _PORT_PCS_DW12_LN01_C 0x6C830
#define _PORT_PCS_DW12_LN23_A 0x162630
#define _PORT_PCS_DW12_LN23_B 0x6C630
#define _PORT_PCS_DW12_LN23_C 0x6CA30
#define _PORT_PCS_DW12_GRP_A 0x162c30
#define _PORT_PCS_DW12_GRP_B 0x6CC30
#define _PORT_PCS_DW12_GRP_C 0x6CE30
#define LANESTAGGER_STRAP_OVRD (1 << 6)
#define LANE_STAGGER_MASK 0x1F
#define BXT_PORT_PCS_DW12_LN01(port) _PORT3(port, _PORT_PCS_DW12_LN01_A, \
_PORT_PCS_DW12_LN01_B, \
_PORT_PCS_DW12_LN01_C)
#define BXT_PORT_PCS_DW12_LN23(port) _PORT3(port, _PORT_PCS_DW12_LN23_A, \
_PORT_PCS_DW12_LN23_B, \
_PORT_PCS_DW12_LN23_C)
#define BXT_PORT_PCS_DW12_GRP(port) _PORT3(port, _PORT_PCS_DW12_GRP_A, \
_PORT_PCS_DW12_GRP_B, \
_PORT_PCS_DW12_GRP_C)
 
/* BXT PHY TX registers */
#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
((lane) & 1) * 0x80)
 
#define _PORT_TX_DW2_LN0_A 0x162508
#define _PORT_TX_DW2_LN0_B 0x6C508
#define _PORT_TX_DW2_LN0_C 0x6C908
#define _PORT_TX_DW2_GRP_A 0x162D08
#define _PORT_TX_DW2_GRP_B 0x6CD08
#define _PORT_TX_DW2_GRP_C 0x6CF08
#define BXT_PORT_TX_DW2_GRP(port) _PORT3(port, _PORT_TX_DW2_GRP_A, \
_PORT_TX_DW2_GRP_B, \
_PORT_TX_DW2_GRP_C)
#define BXT_PORT_TX_DW2_LN0(port) _PORT3(port, _PORT_TX_DW2_LN0_A, \
_PORT_TX_DW2_LN0_B, \
_PORT_TX_DW2_LN0_C)
#define MARGIN_000_SHIFT 16
#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
#define UNIQ_TRANS_SCALE_SHIFT 8
#define UNIQ_TRANS_SCALE (0xFF << UNIQ_TRANS_SCALE_SHIFT)
 
#define _PORT_TX_DW3_LN0_A 0x16250C
#define _PORT_TX_DW3_LN0_B 0x6C50C
#define _PORT_TX_DW3_LN0_C 0x6C90C
#define _PORT_TX_DW3_GRP_A 0x162D0C
#define _PORT_TX_DW3_GRP_B 0x6CD0C
#define _PORT_TX_DW3_GRP_C 0x6CF0C
#define BXT_PORT_TX_DW3_GRP(port) _PORT3(port, _PORT_TX_DW3_GRP_A, \
_PORT_TX_DW3_GRP_B, \
_PORT_TX_DW3_GRP_C)
#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \
_PORT_TX_DW3_LN0_B, \
_PORT_TX_DW3_LN0_C)
#define SCALE_DCOMP_METHOD (1 << 26)
#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
 
#define _PORT_TX_DW4_LN0_A 0x162510
#define _PORT_TX_DW4_LN0_B 0x6C510
#define _PORT_TX_DW4_LN0_C 0x6C910
#define _PORT_TX_DW4_GRP_A 0x162D10
#define _PORT_TX_DW4_GRP_B 0x6CD10
#define _PORT_TX_DW4_GRP_C 0x6CF10
#define BXT_PORT_TX_DW4_LN0(port) _PORT3(port, _PORT_TX_DW4_LN0_A, \
_PORT_TX_DW4_LN0_B, \
_PORT_TX_DW4_LN0_C)
#define BXT_PORT_TX_DW4_GRP(port) _PORT3(port, _PORT_TX_DW4_GRP_A, \
_PORT_TX_DW4_GRP_B, \
_PORT_TX_DW4_GRP_C)
#define DEEMPH_SHIFT 24
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
 
#define _PORT_TX_DW14_LN0_A 0x162538
#define _PORT_TX_DW14_LN0_B 0x6C538
#define _PORT_TX_DW14_LN0_C 0x6C938
#define LATENCY_OPTIM_SHIFT 30
#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
#define BXT_PORT_TX_DW14_LN(port, lane) (_PORT3((port), _PORT_TX_DW14_LN0_A, \
_PORT_TX_DW14_LN0_B, \
_PORT_TX_DW14_LN0_C) + \
_BXT_LANE_OFFSET(lane))
 
/* UAIMI scratch pad register 1 */
#define UAIMI_SPR1 0x4F074
/* SKL VccIO mask */
#define SKL_VCCIO_MASK 0x1
/* SKL balance leg register */
#define DISPIO_CR_TX_BMU_CR0 0x6C00C
/* I_boost values */
#define BALANCE_LEG_SHIFT(port) (8+3*(port))
#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
/* Balance leg disable bits */
#define BALANCE_LEG_DISABLE_SHIFT 23
 
/*
* Fence registers
* [0-7] @ 0x2000 gen2,gen3
* [8-15] @ 0x3000 945,g33,pnv
*
* [0-15] @ 0x3000 gen4,gen5
*
* [0-15] @ 0x100000 gen6,vlv,chv
* [0-31] @ 0x100000 gen7+
*/
#define FENCE_REG_830_0 0x2000
#define FENCE_REG_945_8 0x3000
#define FENCE_REG(i) (0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4)
#define I830_FENCE_START_MASK 0x07f80000
#define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
1072,14 → 1463,16
#define I915_FENCE_START_MASK 0x0ff00000
#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
 
#define FENCE_REG_965_0 0x03000
#define FENCE_REG_965_LO(i) (0x03000 + (i) * 8)
#define FENCE_REG_965_HI(i) (0x03000 + (i) * 8 + 4)
#define I965_FENCE_PITCH_SHIFT 2
#define I965_FENCE_TILING_Y_SHIFT 1
#define I965_FENCE_REG_VALID (1<<0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
 
#define FENCE_REG_SANDYBRIDGE_0 0x100000
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
#define FENCE_REG_GEN6_LO(i) (0x100000 + (i) * 8)
#define FENCE_REG_GEN6_HI(i) (0x100000 + (i) * 8 + 4)
#define GEN6_FENCE_PITCH_SHIFT 32
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
 
 
1086,6 → 1479,7
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
#define TILECTL_TLBPF (1 << 1)
#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
#define TILECTL_BACKSNOOP_DIS (1 << 3)
 
1133,7 → 1527,12
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_RESET_CTL(base) ((base)+0xd0)
#define RESET_CTL_REQUEST_RESET (1 << 0)
#define RESET_CTL_READY_TO_RESET (1 << 1)
 
#define HSW_GTT_CACHE_EN 0x4024
#define GTT_CACHE_EN_ALL 0xF0007FFF
#define GEN7_WR_WATERMARK 0x4028
#define GEN7_GFX_PRIO_CTRL 0x402C
#define ARB_MODE 0x4030
1142,7 → 1541,7
#define GEN7_GFX_PEND_TLB0 0x4034
#define GEN7_GFX_PEND_TLB1 0x4038
/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
#define GEN7_LRA_LIMITS_BASE 0x403C
#define GEN7_LRA_LIMITS(i) (0x403C + (i) * 4)
#define GEN7_LRA_LIMITS_REG_NUM 13
#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
#define GEN7_GFX_MAX_REQ_COUNT 0x4074
1153,11 → 1552,12
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define RING_FAULT_GTTSEL_MASK (1<<11)
#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
#define RING_FAULT_VALID (1<<0)
#define DONE_REG 0x40b0
#define GEN8_PRIVATE_PAT 0x40e0
#define GEN8_PRIVATE_PAT_LO 0x40e0
#define GEN8_PRIVATE_PAT_HI (0x40e0 + 4)
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define VEBOX_HWS_PGA_GEN7 (0x04380)
1197,8 → 1597,6
#endif
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
#define GEN7_INSTDONE_1 0x0206c
#define GEN7_SC_INSTDONE 0x07100
#define GEN7_SAMPLER_INSTDONE 0x0e160
#define GEN7_ROW_INSTDONE 0x0e164
1205,6 → 1603,11
#define I915_NUM_INSTDONE_REG 4
#define RING_IPEIR(base) ((base)+0x64)
#define RING_IPEHR(base) ((base)+0x68)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
* The GEN2 counterpart of this register is GEN2_INSTDONE.
*/
#define RING_INSTDONE(base) ((base)+0x6c)
#define RING_INSTPS(base) ((base)+0x70)
#define RING_DMA_FADD(base) ((base)+0x78)
1212,7 → 1615,7
#define RING_INSTPM(base) ((base)+0xc0)
#define RING_MI_MODE(base) ((base)+0x9c)
#define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */
#define GEN4_INSTDONE1 0x0207c /* 965+ only, aka INSTDONE_2 on SNB */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
#define HWS_ADDRESS_MASK 0xfffff000
1221,7 → 1624,7
#define PWRCTX_EN (1<<0)
#define IPEIR 0x02088
#define IPEHR 0x0208c
#define INSTDONE 0x02090
#define GEN2_INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
#define DMA_FADD_I8XX 0x020d0
1238,10 → 1641,13
#define ERR_INT_PIPE_CRC_DONE_B (1<<5)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
#define ERR_INT_PIPE_CRC_DONE_A (1<<2)
#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3))
#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3))
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
 
#define GEN8_FAULT_TLB_DATA0 0x04b10
#define GEN8_FAULT_TLB_DATA1 0x04b14
 
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
 
1298,17 → 1704,26
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
 
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_INTERRUPT_STEERING (1<<14)
#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
#define GFX_REPLAY_MODE (1<<11)
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
#define GEN8_GFX_PPGTT_48B (1<<7)
 
#define GFX_FORWARD_VBLANK_MASK (3<<5)
#define GFX_FORWARD_VBLANK_NEVER (0<<5)
#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
#define GFX_FORWARD_VBLANK_COND (2<<5)
 
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
 
1454,6 → 1869,7
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
 
#define GEN6_BLITTER_ECOSKPD 0x221d0
#define GEN6_BLITTER_LOCK_SHIFT 16
1464,6 → 1880,43
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
 
/* Fuse readout registers for GT */
#define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16
#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20
#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT)
#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24
#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT)
#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
 
#define GEN8_FUSE2 0x9120
#define GEN8_F2_SS_DIS_SHIFT 21
#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
#define GEN8_F2_S_ENA_SHIFT 25
#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
 
#define GEN9_F2_SS_DIS_SHIFT 20
#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
 
#define GEN8_EU_DISABLE0 0x9134
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
 
#define GEN8_EU_DISABLE1 0x9138
#define GEN8_EU_DIS1_S1_MASK 0xffff
#define GEN8_EU_DIS1_S2_SHIFT 16
#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
 
#define GEN8_EU_DISABLE2 0x913c
#define GEN8_EU_DIS2_S2_MASK 0xff
 
#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4)
 
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
1509,7 → 1962,7
#define I915_ISP_INTERRUPT (1<<22)
#define I915_LPE_PIPE_B_INTERRUPT (1<<21)
#define I915_LPE_PIPE_A_INTERRUPT (1<<20)
#define I915_MIPIB_INTERRUPT (1<<19)
#define I915_MIPIC_INTERRUPT (1<<19)
#define I915_MIPIA_INTERRUPT (1<<18)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
1591,8 → 2044,11
#define FBC_CTL_CPU_FENCE (1<<1)
#define FBC_CTL_PLANE(plane) ((plane)<<0)
#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG 0x03300
#define FBC_TAG(i) (0x03300 + (i) * 4)
 
#define FBC_STATUS2 0x43214
#define FBC_COMPRESSION_MASK 0x7ff
 
#define FBC_LL_SIZE (1536)
 
/* Framebuffer compression for GM45+ */
1688,23 → 2144,26
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
 
#define GMBUS0 0x5100 /* clock/port select */
#define GMBUS0 (dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
#define GMBUS_RATE_100KHZ (0<<8)
#define GMBUS_RATE_50KHZ (1<<8)
#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
#define GMBUS_PORT_DISABLED 0
#define GMBUS_PORT_SSC 1
#define GMBUS_PORT_VGADDC 2
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPD_CHV 3 /* HDMID_CHV */
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
#define GMBUS_PORT_DPD 6 /* HDMID */
#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_PIN_DISABLED 0
#define GMBUS_PIN_SSC 1
#define GMBUS_PIN_VGADDC 2
#define GMBUS_PIN_PANEL 3
#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
#define GMBUS_PIN_DPC 4 /* HDMIC */
#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
#define GMBUS_PIN_DPD 6 /* HDMID */
#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
#define GMBUS_PIN_1_BXT 1
#define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3
#define GMBUS_NUM_PINS 7 /* including 0 */
#define GMBUS1 (dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
#define GMBUS_ENT (1<<29) /* enable timeout */
1713,11 → 2172,12
#define GMBUS_CYCLE_INDEX (2<<25)
#define GMBUS_CYCLE_STOP (4<<25)
#define GMBUS_BYTE_COUNT_SHIFT 16
#define GMBUS_BYTE_COUNT_MAX 256U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
#define GMBUS_SLAVE_READ (1<<0)
#define GMBUS_SLAVE_WRITE (0<<0)
#define GMBUS2 0x5108 /* status */
#define GMBUS2 (dev_priv->gpio_mmio_base + 0x5108) /* status */
#define GMBUS_INUSE (1<<15)
#define GMBUS_HW_WAIT_PHASE (1<<14)
#define GMBUS_STALL_TIMEOUT (1<<13)
1725,14 → 2185,14
#define GMBUS_HW_RDY (1<<11)
#define GMBUS_SATOER (1<<10)
#define GMBUS_ACTIVE (1<<9)
#define GMBUS3 0x510c /* data buffer bytes 3-0 */
#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
#define GMBUS3 (dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
#define GMBUS4 (dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
#define GMBUS_NAK_EN (1<<3)
#define GMBUS_IDLE_EN (1<<2)
#define GMBUS_HW_WAIT_EN (1<<1)
#define GMBUS_HW_RDY_EN (1<<0)
#define GMBUS5 0x5120 /* byte index */
#define GMBUS5 (dev_priv->gpio_mmio_base + 0x5120) /* byte index */
#define GMBUS_2BYTE_INDEX_EN (1<<31)
 
/*
1759,7 → 2219,7
#define DPLL_DVO_2X_MODE (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_REF_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
1773,8 → 2233,8
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define DPLL_SSC_REF_CLOCK_CHV (1<<13)
#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13)
#define DPLL_SSC_REF_CLK_CHV (1<<13)
#define DPLL_PORTC_READY_MASK (0xf << 4)
#define DPLL_PORTB_READY_MASK (0xf)
 
1784,9 → 2244,20
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
#define PHY_LDO_DELAY_0NS 0x0
#define PHY_LDO_DELAY_200NS 0x1
#define PHY_LDO_DELAY_600NS 0x2
#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11))
#define PHY_CH_SU_PSR 0x1
#define PHY_CH_DEEP_PSR 0x7
#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
 
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
2021,6 → 2492,14
#define CDCLK_FREQ_SHIFT 4
#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
#define CZCLK_FREQ_MASK 0xf
 
#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C)
#define PFI_CREDIT_63 (9 << 28) /* chv only */
#define PFI_CREDIT_31 (8 << 28) /* chv only */
#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */
#define PFI_CREDIT_RESEND (1 << 27)
#define VGA_FAST_MODE_DISABLE (1 << 14)
 
#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
 
/*
2029,8 → 2508,8
#define PALETTE_A_OFFSET 0xa000
#define PALETTE_B_OFFSET 0xa800
#define CHV_PALETTE_C_OFFSET 0xc000
#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
dev_priv->info.display_mmio_offset)
#define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \
dev_priv->info.display_mmio_offset + (i) * 4)
 
/* MCH MMIO space */
 
2048,6 → 2527,11
 
#define MCHBAR_MIRROR_BASE_SNB 0x140000
 
#define CTG_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x34)
#define ELK_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x48)
#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
 
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
 
2116,6 → 2600,9
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
 
#define HPLLVCO (MCHBAR_MIRROR_BASE + 0xc38)
#define HPLLVCO_MOBILE (MCHBAR_MIRROR_BASE + 0xc0f)
 
#define TSC1 0x11001
#define TSE (1<<0)
#define TR1 0x11006
2125,7 → 2612,7
#define TSFS_INTR_MASK 0x000000ff
 
#define CRSTANDVID 0x11100
#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
#define PXVFREQ(i) (0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
#define PXVFREQ_PX_MASK 0x7f000000
#define PXVFREQ_PX_SHIFT 24
#define VIDFREQ_BASE 0x11110
2309,8 → 2796,8
#define CSIEW0 0x11250
#define CSIEW1 0x11254
#define CSIEW2 0x11258
#define PEW 0x1125c
#define DEW 0x11270
#define PEW(i) (0x1125c + (i) * 4) /* 5 registers */
#define DEW(i) (0x11270 + (i) * 4) /* 3 registers */
#define MCHAFE 0x112c0
#define CSIEC 0x112e0
#define DMIEC 0x112e4
2334,8 → 2821,8
#define EG5 0x11624
#define EG6 0x11628
#define EG7 0x1162c
#define PXW 0x11664
#define PXWL 0x11680
#define PXW(i) (0x11664 + (i) * 4) /* 4 registers */
#define PXWL(i) (0x11680 + (i) * 4) /* 8 registers */
#define LCFUSE02 0x116c0
#define LCFUSE_HIV_MASK 0x000000ff
#define CSIPLL0 0x12c10
2346,9 → 2833,20
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define BXT_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x7070)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP 0x138170
 
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
(IS_BROXTON(dev_priv) ? \
INTERVAL_0_833_US(us) : \
INTERVAL_1_33_US(us)) : \
INTERVAL_1_28_US(us))
 
/*
* Logical Context regs
*/
2368,21 → 2866,21
* doesn't need saving on GT1
*/
#define CXT_SIZE 0x21a0
#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f)
#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f)
#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f)
#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f)
#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f)
#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
GEN6_CXT_PIPELINE_SIZE(cxt_reg))
#define GEN7_CXT_SIZE 0x21a8
#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f)
#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7)
#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f)
#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f)
#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7)
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
/* Haswell does have the CXT_SIZE register however it does not appear to be
2389,7 → 2887,8
* valid. Now, docs explain in dwords what is in the context object. The full
* size is 70720 bytes, however, the power context and execlist context will
* never be saved (power context is stored elsewhere, and execlists don't work
* on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
* on HSW) - so the final size, including the extra state required for the
* Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
/* Same as Haswell, but 72064 bytes now. */
2539,12 → 3038,47
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
 
/* VLV eDP PSR registers */
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
#define VLV_EDP_PSR_ENABLE (1<<0)
#define VLV_EDP_PSR_RESET (1<<1)
#define VLV_EDP_PSR_MODE_MASK (7<<2)
#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
#define VLV_EDP_PSR_DBL_FRAME (1<<10)
#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
 
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB)
 
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
#define VLV_EDP_PSR_CURR_STATE_MASK 7
#define VLV_EDP_PSR_DISABLED (0<<0)
#define VLV_EDP_PSR_INACTIVE (1<<0)
#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
#define VLV_EDP_PSR_EXIT (5<<0)
#define VLV_EDP_PSR_IN_TRANS (1<<7)
#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
 
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
#define BDW_PSR_SINGLE_FRAME (1<<30)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
2604,6 → 3138,20
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
 
#define EDP_PSR2_CTL 0x6f900
#define EDP_PSR2_ENABLE (1<<31)
#define EDP_SU_TRACK_ENABLE (1<<30)
#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
#define EDP_PSR2_TP2_TIME_500 (0<<8)
#define EDP_PSR2_TP2_TIME_100 (1<<8)
#define EDP_PSR2_TP2_TIME_2500 (2<<8)
#define EDP_PSR2_TP2_TIME_50 (3<<8)
#define EDP_PSR2_TP2_TIME_MASK (3<<8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
#define EDP_PSR2_IDLE_MASK 0xf
 
/* VGA port control */
#define ADPA 0x61100
#define PCH_ADPA 0xe1100
2752,7 → 3300,9
#define GEN3_SDVOC 0x61160
#define GEN4_HDMIB GEN3_SDVOB
#define GEN4_HDMIC GEN3_SDVOC
#define CHV_HDMID 0x6116C
#define VLV_HDMIB (VLV_DISPLAY_BASE + GEN4_HDMIB)
#define VLV_HDMIC (VLV_DISPLAY_BASE + GEN4_HDMIC)
#define CHV_HDMID (VLV_DISPLAY_BASE + 0x6116C)
#define PCH_SDVOB 0xe1140
#define PCH_HDMIB PCH_SDVOB
#define PCH_HDMIC 0xe1150
2762,7 → 3312,8
#define DC_BALANCE_RESET (1 << 25)
#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
#define PIPE_A_SCRAMBLE_RESET (1 << 0)
 
2904,7 → 3455,7
 
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
3063,6 → 3614,7
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
 
#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
#define BLM_HISTOGRAM_ENABLE (1 << 31)
 
/* New registers for PCH-split platforms. Safe where new bits show up, the
* register layout machtes with gen4 BLC_PWM_CTL[12]. */
3082,6 → 3634,30
#define UTIL_PIN_CTL 0x48400
#define UTIL_PIN_ENABLE (1 << 31)
 
#define UTIL_PIN_PIPE(x) ((x) << 29)
#define UTIL_PIN_PIPE_MASK (3 << 29)
#define UTIL_PIN_MODE_PWM (1 << 24)
#define UTIL_PIN_MODE_MASK (0xf << 24)
#define UTIL_PIN_POLARITY (1 << 22)
 
/* BXT backlight register definition. */
#define _BXT_BLC_PWM_CTL1 0xC8250
#define BXT_BLC_PWM_ENABLE (1 << 31)
#define BXT_BLC_PWM_POLARITY (1 << 29)
#define _BXT_BLC_PWM_FREQ1 0xC8254
#define _BXT_BLC_PWM_DUTY1 0xC8258
 
#define _BXT_BLC_PWM_CTL2 0xC8350
#define _BXT_BLC_PWM_FREQ2 0xC8354
#define _BXT_BLC_PWM_DUTY2 0xC8358
 
#define BXT_BLC_PWM_CTL(controller) _PIPE(controller, \
_BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
#define BXT_BLC_PWM_FREQ(controller) _PIPE(controller, \
_BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
#define BXT_BLC_PWM_DUTY(controller) _PIPE(controller, \
_BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
 
#define PCH_GTC_CTL 0xe7000
#define PCH_GTC_ENABLE (1 << 31)
 
3556,14 → 4132,10
# define TV_CC_DATA_1_MASK 0x0000007f
# define TV_CC_DATA_1_SHIFT 0
 
#define TV_H_LUMA_0 0x68100
#define TV_H_LUMA_59 0x681ec
#define TV_H_CHROMA_0 0x68200
#define TV_H_CHROMA_59 0x682ec
#define TV_V_LUMA_0 0x68300
#define TV_V_LUMA_42 0x683a8
#define TV_V_CHROMA_0 0x68400
#define TV_V_CHROMA_42 0x684a8
#define TV_H_LUMA(i) (0x68100 + (i) * 4) /* 60 registers */
#define TV_H_CHROMA(i) (0x68200 + (i) * 4) /* 60 registers */
#define TV_V_LUMA(i) (0x68300 + (i) * 4) /* 43 registers */
#define TV_V_CHROMA(i) (0x68400 + (i) * 4) /* 43 registers */
 
/* Display Port */
#define DP_A 0x64000 /* eDP */
3571,6 → 4143,10
#define DP_C 0x64200
#define DP_D 0x64300
 
#define VLV_DP_B (VLV_DISPLAY_BASE + DP_B)
#define VLV_DP_C (VLV_DISPLAY_BASE + DP_C)
#define CHV_DP_D (VLV_DISPLAY_BASE + DP_D)
 
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
3616,6 → 4192,7
/* How many wires to use. I guess 3 was too hard */
#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
#define DP_PORT_WIDTH_MASK (7 << 19)
#define DP_PORT_WIDTH_SHIFT 19
 
/* Mystic DPCD version 1.1 special mode */
#define DP_ENHANCED_FRAMING (1 << 18)
3704,6 → 4281,11
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5)
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
 
/*
3796,6 → 4378,7
#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0<<5)
3944,7 → 4527,7
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
 
#define DSPARB 0x70030
#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
3951,6 → 4534,32
#define DSPARB_BSTART_SHIFT 0
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
#define DSPARB_SPRITEA_SHIFT_VLV 0
#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
#define DSPARB_SPRITEB_SHIFT_VLV 8
#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
#define DSPARB_SPRITEC_SHIFT_VLV 16
#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
#define DSPARB_SPRITED_SHIFT_VLV 24
#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
#define DSPARB_SPRITED_HI_SHIFT_VLV 12
#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */
#define DSPARB_SPRITEE_SHIFT_VLV 0
#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
#define DSPARB_SPRITEF_SHIFT_VLV 8
#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
 
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
3975,8 → 4584,8
#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_CURSORA_MASK (0x3f<<8)
#define DSPFW_PLANEC_SHIFT_OLD 0
#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
#define DSPFW_PLANEC_OLD_SHIFT 0
#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
4015,25 → 4624,25
#define DSPFW_SPRITED_WM1_SHIFT 24
#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
#define DSPFW_SPRITED_SHIFT 16
#define DSPFW_SPRITED_MASK (0xff<<16)
#define DSPFW_SPRITED_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEC_SHIFT 0
#define DSPFW_SPRITEC_MASK (0xff<<0)
#define DSPFW_SPRITEC_MASK_VLV (0xff<<0)
#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
#define DSPFW_SPRITEF_SHIFT 16
#define DSPFW_SPRITEF_MASK (0xff<<16)
#define DSPFW_SPRITEF_MASK_VLV (0xff<<16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEE_SHIFT 0
#define DSPFW_SPRITEE_MASK (0xff<<0)
#define DSPFW_SPRITEE_MASK_VLV (0xff<<0)
#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
#define DSPFW_PLANEC_SHIFT 16
#define DSPFW_PLANEC_MASK (0xff<<16)
#define DSPFW_PLANEC_MASK_VLV (0xff<<16)
#define DSPFW_CURSORC_WM1_SHIFT 8
#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
#define DSPFW_CURSORC_SHIFT 0
4042,7 → 4651,7
/* vlv/chv high order bits */
#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
#define DSPFW_SR_HI_MASK (1<<24)
#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_HI_SHIFT 23
#define DSPFW_SPRITEF_HI_MASK (1<<23)
#define DSPFW_SPRITEE_HI_SHIFT 22
4063,7 → 4672,7
#define DSPFW_PLANEA_HI_MASK (1<<0)
#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
#define DSPFW_SR_WM1_HI_MASK (1<<24)
#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
4084,21 → 4693,18
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
 
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_16 16
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define DDL_CURSOR_PRECISION_HIGH (1<<31)
#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
#define DDL_PLANE_PRECISION_HIGH (1<<7)
#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
#define DDL_PRECISION_HIGH (1<<7)
#define DDL_PRECISION_LOW (0<<7)
#define DRAIN_LATENCY_MASK 0x7f
 
#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400)
#define CBR_PND_DEADLINE_DISABLE (1<<31)
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
 
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
4240,10 → 4846,10
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
#define _PIPEA_FRMCOUNT_GM45 0x70040
#define _PIPEA_FLIPCOUNT_GM45 0x70044
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45)
#define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45)
#define _PIPEA_FRMCOUNT_G4X 0x70040
#define _PIPEA_FLIPCOUNT_G4X 0x70044
#define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X)
#define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X)
 
/* Cursor A & B regs */
#define _CURACNTR 0x70080
4385,20 → 4991,20
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
 
/* VBIOS flags */
#define SWF00 (dev_priv->info.display_mmio_offset + 0x71410)
#define SWF01 (dev_priv->info.display_mmio_offset + 0x71414)
#define SWF02 (dev_priv->info.display_mmio_offset + 0x71418)
#define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c)
#define SWF04 (dev_priv->info.display_mmio_offset + 0x71420)
#define SWF05 (dev_priv->info.display_mmio_offset + 0x71424)
#define SWF06 (dev_priv->info.display_mmio_offset + 0x71428)
#define SWF10 (dev_priv->info.display_mmio_offset + 0x70410)
#define SWF11 (dev_priv->info.display_mmio_offset + 0x70414)
#define SWF14 (dev_priv->info.display_mmio_offset + 0x71420)
#define SWF30 (dev_priv->info.display_mmio_offset + 0x72414)
#define SWF31 (dev_priv->info.display_mmio_offset + 0x72418)
#define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c)
/*
* VBIOS flags
* gen2:
* [00:06] alm,mgm
* [10:16] all
* [30:32] alm,mgm
* gen3+:
* [00:0f] all
* [10:1f] all
* [30:32] all
*/
#define SWF0(i) (dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
#define SWF1(i) (dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
#define SWF3(i) (dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
 
/* Pipe B */
#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
4406,8 → 5012,8
#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040)
#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044)
#define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040)
#define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044)
 
 
/* Display B control */
4617,18 → 5223,18
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
 
#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE)
#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS)
#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE)
#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL)
#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK)
#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF)
#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL)
#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF)
#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
#define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
#define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
#define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
#define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
#define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
#define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
#define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
#define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
#define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
#define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
#define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
 
/*
* CHV pipe B sprite CSC
4704,7 → 5310,9
#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
#define PLANE_CTL_ROTATE_MASK 0x3
#define PLANE_CTL_ROTATE_0 0x0
#define PLANE_CTL_ROTATE_90 0x1
#define PLANE_CTL_ROTATE_180 0x2
#define PLANE_CTL_ROTATE_270 0x3
#define _PLANE_STRIDE_1_A 0x70188
#define _PLANE_STRIDE_2_A 0x70288
#define _PLANE_STRIDE_3_A 0x70388
4728,6 → 5336,8
#define _PLANE_KEYMAX_2_A 0x702a0
#define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
 
#define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280
4814,6 → 5424,15
#define PLANE_BUF_CFG(pipe, plane) \
_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
 
#define _PLANE_NV12_BUF_CFG_1_B 0x71278
#define _PLANE_NV12_BUF_CFG_2_B 0x71378
#define _PLANE_NV12_BUF_CFG_1(pipe) \
_PIPE(pipe, _PLANE_NV12_BUF_CFG_1_A, _PLANE_NV12_BUF_CFG_1_B)
#define _PLANE_NV12_BUF_CFG_2(pipe) \
_PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B)
#define PLANE_NV12_BUF_CFG(pipe, plane) \
_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
 
/* SKL new cursor registers */
#define _CUR_BUF_CFG_A 0x7017c
#define _CUR_BUF_CFG_B 0x7117c
4833,13 → 5452,15
 
#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
#define DIGITAL_PORTA_NO_DETECT (0 << 0)
#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
#define DIGITAL_PORTA_PULSE_DURATION_4_5ms (1 << 2) /* pre-HSW */
#define DIGITAL_PORTA_PULSE_DURATION_6ms (2 << 2) /* pre-HSW */
#define DIGITAL_PORTA_PULSE_DURATION_100ms (3 << 2) /* pre-HSW */
#define DIGITAL_PORTA_PULSE_DURATION_MASK (3 << 2) /* pre-HSW */
#define DIGITAL_PORTA_HOTPLUG_STATUS_MASK (3 << 0)
#define DIGITAL_PORTA_HOTPLUG_NO_DETECT (0 << 0)
#define DIGITAL_PORTA_HOTPLUG_SHORT_DETECT (1 << 0)
#define DIGITAL_PORTA_HOTPLUG_LONG_DETECT (2 << 0)
 
/* refresh rate hardware control */
#define RR_HW_CTL 0x45300
4946,10 → 5567,125
#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
 
/*
* Skylake scalers
*/
#define _PS_1A_CTRL 0x68180
#define _PS_2A_CTRL 0x68280
#define _PS_1B_CTRL 0x68980
#define _PS_2B_CTRL 0x68A80
#define _PS_1C_CTRL 0x69180
#define PS_SCALER_EN (1 << 31)
#define PS_SCALER_MODE_MASK (3 << 28)
#define PS_SCALER_MODE_DYN (0 << 28)
#define PS_SCALER_MODE_HQ (1 << 28)
#define PS_PLANE_SEL_MASK (7 << 25)
#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
#define PS_FILTER_MASK (3 << 23)
#define PS_FILTER_MEDIUM (0 << 23)
#define PS_FILTER_EDGE_ENHANCE (2 << 23)
#define PS_FILTER_BILINEAR (3 << 23)
#define PS_VERT3TAP (1 << 21)
#define PS_VERT_INT_INVERT_FIELD1 (0 << 20)
#define PS_VERT_INT_INVERT_FIELD0 (1 << 20)
#define PS_PWRUP_PROGRESS (1 << 17)
#define PS_V_FILTER_BYPASS (1 << 8)
#define PS_VADAPT_EN (1 << 7)
#define PS_VADAPT_MODE_MASK (3 << 5)
#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5)
#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
 
#define _PS_PWR_GATE_1A 0x68160
#define _PS_PWR_GATE_2A 0x68260
#define _PS_PWR_GATE_1B 0x68960
#define _PS_PWR_GATE_2B 0x68A60
#define _PS_PWR_GATE_1C 0x69160
#define PS_PWR_GATE_DIS_OVERRIDE (1 << 31)
#define PS_PWR_GATE_SETTLING_TIME_32 (0 << 3)
#define PS_PWR_GATE_SETTLING_TIME_64 (1 << 3)
#define PS_PWR_GATE_SETTLING_TIME_96 (2 << 3)
#define PS_PWR_GATE_SETTLING_TIME_128 (3 << 3)
#define PS_PWR_GATE_SLPEN_8 0
#define PS_PWR_GATE_SLPEN_16 1
#define PS_PWR_GATE_SLPEN_24 2
#define PS_PWR_GATE_SLPEN_32 3
 
#define _PS_WIN_POS_1A 0x68170
#define _PS_WIN_POS_2A 0x68270
#define _PS_WIN_POS_1B 0x68970
#define _PS_WIN_POS_2B 0x68A70
#define _PS_WIN_POS_1C 0x69170
 
#define _PS_WIN_SZ_1A 0x68174
#define _PS_WIN_SZ_2A 0x68274
#define _PS_WIN_SZ_1B 0x68974
#define _PS_WIN_SZ_2B 0x68A74
#define _PS_WIN_SZ_1C 0x69174
 
#define _PS_VSCALE_1A 0x68184
#define _PS_VSCALE_2A 0x68284
#define _PS_VSCALE_1B 0x68984
#define _PS_VSCALE_2B 0x68A84
#define _PS_VSCALE_1C 0x69184
 
#define _PS_HSCALE_1A 0x68190
#define _PS_HSCALE_2A 0x68290
#define _PS_HSCALE_1B 0x68990
#define _PS_HSCALE_2B 0x68A90
#define _PS_HSCALE_1C 0x69190
 
#define _PS_VPHASE_1A 0x68188
#define _PS_VPHASE_2A 0x68288
#define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188
 
#define _PS_HPHASE_1A 0x68194
#define _PS_HPHASE_2A 0x68294
#define _PS_HPHASE_1B 0x68994
#define _PS_HPHASE_2B 0x68A94
#define _PS_HPHASE_1C 0x69194
 
#define _PS_ECC_STAT_1A 0x681D0
#define _PS_ECC_STAT_2A 0x682D0
#define _PS_ECC_STAT_1B 0x689D0
#define _PS_ECC_STAT_2B 0x68AD0
#define _PS_ECC_STAT_1C 0x691D0
 
#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
#define SKL_PS_CTRL(pipe, id) _PIPE(pipe, \
_ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
_ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
#define SKL_PS_PWR_GATE(pipe, id) _PIPE(pipe, \
_ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
_ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
#define SKL_PS_WIN_POS(pipe, id) _PIPE(pipe, \
_ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
_ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
#define SKL_PS_WIN_SZ(pipe, id) _PIPE(pipe, \
_ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
_ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
#define SKL_PS_VSCALE(pipe, id) _PIPE(pipe, \
_ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
_ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
#define SKL_PS_HSCALE(pipe, id) _PIPE(pipe, \
_ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
_ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
#define SKL_PS_VPHASE(pipe, id) _PIPE(pipe, \
_ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
_ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
#define SKL_PS_HPHASE(pipe, id) _PIPE(pipe, \
_ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
_ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
#define SKL_PS_ECC_STAT(pipe, id) _PIPE(pipe, \
_ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
_ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)
 
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
#define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
 
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
5009,7 → 5745,7
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
#define DE_PIPEA_VBLANK_IVB (1<<0)
#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
#define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5))
 
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1<<31)
5033,7 → 5769,7
#define GEN8_DE_PIPE_C_IRQ (1<<18)
#define GEN8_DE_PIPE_B_IRQ (1<<17)
#define GEN8_DE_PIPE_A_IRQ (1<<16)
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe)))
#define GEN8_GT_VECS_IRQ (1<<6)
#define GEN8_GT_PM_IRQ (1<<4)
#define GEN8_GT_VCS2_IRQ (1<<3)
5046,11 → 5782,12
#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
 
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_VCS1_IRQ_SHIFT 0
#define GEN8_VCS2_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
 
#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
5068,13 → 5805,15
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
#define GEN9_PIPE_PLANE4_FAULT (1 << 10)
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
#define GEN9_PIPE_PLANE4_FLIP_DONE (1 << 6)
#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + (p)))
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN8_PIPE_CURSOR_FAULT | \
GEN8_PIPE_SPRITE_FAULT | \
5081,6 → 5820,7
GEN8_PIPE_PRIMARY_FAULT)
#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN9_PIPE_CURSOR_FAULT | \
GEN9_PIPE_PLANE4_FAULT | \
GEN9_PIPE_PLANE3_FAULT | \
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
5089,10 → 5829,17
#define GEN8_DE_PORT_IMR 0x44444
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
#define BXT_DE_PORT_HP_DDIC (1 << 5)
#define BXT_DE_PORT_HP_DDIB (1 << 4)
#define BXT_DE_PORT_HP_DDIA (1 << 3)
#define BXT_DE_PORT_HOTPLUG_MASK (BXT_DE_PORT_HP_DDIA | \
BXT_DE_PORT_HP_DDIB | \
BXT_DE_PORT_HP_DDIC)
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
 
#define GEN8_DE_MISC_ISR 0x44460
5146,6 → 5893,9
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 0x45004
#define DISP_DATA_PARTITION_5_6 (1<<6)
#define DBUF_CTL 0x45008
#define DBUF_POWER_REQUEST (1<<31)
#define DBUF_POWER_STATE (1<<30)
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
5152,15 → 5902,36
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
 
#define SKL_DFSM 0x51000
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
 
#define FF_SLICE_CS_CHICKEN2 0x20e4
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
 
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 0x7014
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
 
#define HIZ_CHICKEN 0x7018
# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3)
 
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
 
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
 
#define GEN8_L3SQCREG1 0xB100
#define BDW_WA_L3SQCREG1_DEFAULT 0x784000
 
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
5173,12 → 5944,23
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
 
#define GEN8_L3SQCREG4 0xb118
#define GEN8_LQSC_RO_PERF_DIS (1<<27)
#define GEN8_LQSC_FLUSH_COHERENT_LINES (1<<21)
 
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
#define HDC_FORCE_NON_COHERENT (1<<4)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
 
/* GEN9 chicken */
#define SLICE_ECO_CHICKEN0 0x7308
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
 
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
5186,6 → 5968,9
#define HSW_SCRATCH1 0xb038
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
 
#define BDW_SCRATCH1 0xb11c
#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
 
/* PCH */
 
/* south display engine interrupt: IBX */
5239,6 → 6024,8
#define SDE_AUXC_CPT (1 << 26)
#define SDE_AUXB_CPT (1 << 25)
#define SDE_AUX_MASK_CPT (7 << 25)
#define SDE_PORTE_HOTPLUG_SPT (1 << 25)
#define SDE_PORTA_HOTPLUG_SPT (1 << 24)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
5249,6 → 6036,11
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
#define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT | \
SDE_PORTA_HOTPLUG_SPT)
#define SDE_GMBUS_CPT (1 << 17)
#define SDE_ERROR_CPT (1 << 16)
#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
5280,41 → 6072,53
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
 
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_LONG_DETECT (2 << 24) /* SPT+ & BXT */
#define PORTD_HOTPLUG_ENABLE (1 << 20)
#define PORTD_PULSE_DURATION_2ms (0)
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
#define PORTD_PULSE_DURATION_MASK (3 << 18)
#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
#define PORTD_PULSE_DURATION_2ms (0 << 18) /* pre-LPT */
#define PORTD_PULSE_DURATION_4_5ms (1 << 18) /* pre-LPT */
#define PORTD_PULSE_DURATION_6ms (2 << 18) /* pre-LPT */
#define PORTD_PULSE_DURATION_100ms (3 << 18) /* pre-LPT */
#define PORTD_PULSE_DURATION_MASK (3 << 18) /* pre-LPT */
#define PORTD_HOTPLUG_STATUS_MASK (3 << 16)
#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
#define PORTC_PULSE_DURATION_2ms (0)
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
#define PORTC_PULSE_DURATION_MASK (3 << 10)
#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_100ms (3 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_MASK (3 << 10) /* pre-LPT */
#define PORTC_HOTPLUG_STATUS_MASK (3 << 8)
#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
#define PORTB_PULSE_DURATION_2ms (0)
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
#define PORTB_PULSE_DURATION_MASK (3 << 2)
#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_100ms (3 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_MASK (3 << 2) /* pre-LPT */
#define PORTB_HOTPLUG_STATUS_MASK (3 << 0)
#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
 
#define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 SPT+ */
#define PORTE_HOTPLUG_ENABLE (1 << 4)
#define PORTE_HOTPLUG_STATUS_MASK (3 << 0)
#define PORTE_HOTPLUG_NO_DETECT (0 << 0)
#define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
 
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
#define PCH_GPIOC 0xc5018
5379,9 → 6183,9
#define PCH_SSC4_AUX_PARMS 0xc6214
 
#define PCH_DPLL_SEL 0xc7000
#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4))
#define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4))
#define TRANS_DPLLA_SEL(pipe) 0
#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3))
#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
 
/* transcoder */
 
5418,6 → 6222,9
#define _VIDEO_DIP_CTL_A 0xe0200
#define _VIDEO_DIP_DATA_A 0xe0208
#define _VIDEO_DIP_GCP_A 0xe0210
#define GCP_COLOR_INDICATION (1 << 2)
#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
#define GCP_AV_MUTE (1 << 0)
 
#define _VIDEO_DIP_CTL_B 0xe1200
#define _VIDEO_DIP_DATA_B 0xe1208
5479,16 → 6286,16
 
#define HSW_TVIDEO_DIP_CTL(trans) \
_TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A)
#define HSW_TVIDEO_DIP_VS_DATA(trans) \
_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A)
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \
(_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) \
(_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \
(_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4)
#define HSW_TVIDEO_DIP_GCP(trans) \
_TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \
(_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4)
 
#define HSW_STEREO_3D_CTL_A 0x70020
#define S3D_ENABLE (1<<31)
5557,6 → 6364,7
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10)
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
5573,9 → 6381,11
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SPT_PWM_GRANULARITY (1<<0)
#define SOUTH_CHICKEN2 0xc2004
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
#define LPT_PWM_GRANULARITY (1<<5)
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
 
#define _FDI_RXA_CHICKEN 0xc200c
5741,6 → 6551,8
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define PANEL_UNLOCK_MASK (0xffff << 16)
#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0)
#define BXT_POWER_CYCLE_DELAY_SHIFT 4
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
5769,6 → 6581,17
#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
 
/* BXT PPS changes - 2nd set of PPS registers */
#define _BXT_PP_STATUS2 0xc7300
#define _BXT_PP_CONTROL2 0xc7304
#define _BXT_PP_ON_DELAYS2 0xc7308
#define _BXT_PP_OFF_DELAYS2 0xc730c
 
#define BXT_PP_STATUS(n) _PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2)
#define BXT_PP_CONTROL(n) _PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2)
#define BXT_PP_ON_DELAYS(n) _PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
#define BXT_PP_OFF_DELAYS(n) _PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
 
#define PCH_DP_B 0xe4100
#define PCH_DPB_AUX_CH_CTL 0xe4110
#define PCH_DPB_AUX_CH_DATA1 0xe4114
5814,6 → 6637,7
#define TRANS_DP_PORT_SEL_D (2<<29)
#define TRANS_DP_PORT_SEL_NONE (3<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29)
#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18)
#define TRANS_DP_8BPC (0<<9)
5904,10 → 6728,13
#define GTFIFOCTL 0x120008
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
 
#define HSW_IDICR 0x9008
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
#define HSW_EDRAM_PRESENT 0x120010
#define EDRAM_ENABLED 0x1
 
#define GEN6_UCGCTL1 0x9400
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
5915,6 → 6742,7
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
 
#define GEN6_UCGCTL2 0x9404
# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
5931,7 → 6759,9
#define GEN6_RSTCTL 0x9420
 
#define GEN8_UCGCTL6 0x9430
#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
 
#define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008
5938,6 → 6768,7
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
#define HSW_FREQUENCY(x) ((x)<<24)
#define GEN9_FREQUENCY(x) ((x)<<23)
#define GEN6_OFFSET(x) ((x)<<19)
#define GEN6_AGGRESSIVE_TURBO (0<<15)
#define GEN6_RC_VIDEO_FREQ 0xA00C
5956,8 → 6787,10
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
#define GEN9_CAGF_SHIFT 23
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
6005,7 → 6838,16
#define GEN6_PMINTRMSK 0xA168
#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
#define VLV_PWRDWNUPCTL 0xA294
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4
#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8
#define GEN9_PG_ENABLE 0xA210
#define GEN9_RENDER_PG_ENABLE (1<<0)
#define GEN9_MEDIA_PG_ENABLE (1<<1)
 
#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
#define PIXEL_OVERLAP_CNT_SHIFT 30
 
#define GEN6_PMISR 0x44020
#define GEN6_PMIMR 0x44024 /* rps_lock */
#define GEN6_PMIIR 0x44028
6021,7 → 6863,7
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
 
#define GEN7_GT_SCRATCH_BASE 0x4F100
#define GEN7_GT_SCRATCH(i) (0x4F100 + (i) * 4)
#define GEN7_GT_SCRATCH_REG_NUM 8
 
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
6041,20 → 6883,30
 
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
#define VLV_RENDER_C0_COUNT_REG 0x138118
#define VLV_MEDIA_C0_COUNT_REG 0x13811C
#define VLV_RENDER_C0_COUNT 0x138118
#define VLV_MEDIA_C0_COUNT 0x13811C
 
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
#define SKL_PCODE_CDCLK_CONTROL 0x7
#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
#define SKL_CDCLK_READY_FOR_CHANGE 0x1
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN6_PCODE_DATA 0x138128
6062,12 → 6914,6
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 0x13812C
 
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
 
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7
6076,9 → 6922,44
#define GEN6_RC6 3
#define GEN6_RC7 4
 
#define GEN8_GT_SLICE_INFO 0x138064
#define GEN8_LSLICESTAT_MASK 0x7
 
#define CHV_POWER_SS0_SIG1 0xa720
#define CHV_POWER_SS1_SIG1 0xa728
#define CHV_SS_PG_ENABLE (1<<1)
#define CHV_EU08_PG_ENABLE (1<<9)
#define CHV_EU19_PG_ENABLE (1<<17)
#define CHV_EU210_PG_ENABLE (1<<25)
 
#define CHV_POWER_SS0_SIG2 0xa724
#define CHV_POWER_SS1_SIG2 0xa72c
#define CHV_EU311_PG_ENABLE (1<<1)
 
#define GEN9_SLICE_PGCTL_ACK(slice) (0x804c + (slice)*0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
 
#define GEN9_SS01_EU_PGCTL_ACK(slice) (0x805c + (slice)*0x8)
#define GEN9_SS23_EU_PGCTL_ACK(slice) (0x8060 + (slice)*0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6)
#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8)
#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10)
#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12)
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
 
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
 
#define GEN8_GARBCNTL 0xB004
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
 
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */
6102,10 → 6983,12
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
 
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
6118,10 → 7001,18
#define HSW_ROW_CHICKEN3 0xe49c
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
 
#define HALF_SLICE_CHICKEN2 0xe180
#define GEN8_ST_PO_DISABLE (1<<13)
 
#define HALF_SLICE_CHICKEN3 0xe184
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
 
#define GEN9_HALF_SLICE_CHICKEN7 0xe194
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
 
/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
6258,6 → 7149,9
#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
 
#define HSW_AUD_CHICKENBIT 0x65f10
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
 
/* HSW Power Wells */
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
6271,6 → 7165,13
#define HSW_PWR_WELL_FORCE_ON (1<<19)
#define HSW_PWR_WELL_CTL6 0x45414
 
/* SKL Fuse Status */
#define SKL_FUSE_STATUS 0x42000
#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
 
/* Per-pipe DDI Function Control */
#define TRANS_DDI_FUNC_CTL_A 0x60400
#define TRANS_DDI_FUNC_CTL_B 0x61400
6346,12 → 7247,15
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
#define DDI_PORT_WIDTH_SHIFT 1
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
 
/* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60
#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
#define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8)
#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4)
 
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
6444,7 → 7348,7
#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
/* For each transcoder, we need to select the corresponding port clock */
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
 
#define TRANSA_MSA_MISC 0x60410
#define TRANSB_MSA_MISC 0x61410
6469,6 → 7373,7
#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
#define LCPLL_CLK_FREQ_675_BDW (3<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_POWER_DOWN_ALLOW (1<<22)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
6487,6 → 7392,13
#define CDCLK_FREQ_675_617 (3<<26)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
 
#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
 
/* LCPLL_CTL */
#define LCPLL1_CTL 0x46010
#define LCPLL2_CTL 0x46014
6496,23 → 7408,23
#define DPLL_CTRL1 0x6C058
#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1)
#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
#define DPLL_CRTL1_LINK_RATE_2700 0
#define DPLL_CRTL1_LINK_RATE_1350 1
#define DPLL_CRTL1_LINK_RATE_810 2
#define DPLL_CRTL1_LINK_RATE_1620 3
#define DPLL_CRTL1_LINK_RATE_1080 4
#define DPLL_CRTL1_LINK_RATE_2160 5
#define DPLL_CTRL1_LINK_RATE_2700 0
#define DPLL_CTRL1_LINK_RATE_1350 1
#define DPLL_CTRL1_LINK_RATE_810 2
#define DPLL_CTRL1_LINK_RATE_1620 3
#define DPLL_CTRL1_LINK_RATE_1080 4
#define DPLL_CTRL1_LINK_RATE_2160 5
 
/* DPLL control2 */
#define DPLL_CTRL2 0x6C05C
#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15))
#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk)<<((port)*3+1))
#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
 
/* DPLL Status */
6525,7 → 7437,7
#define DPLL3_CFGCR1 0x6C050
#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9)
#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
 
#define DPLL1_CFGCR2 0x6C044
6532,16 → 7444,16
#define DPLL2_CFGCR2 0x6C04C
#define DPLL3_CFGCR2 0x6C054
#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8)
#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7)
#define DPLL_CFGCR2_KDIV_MASK (3<<5)
#define DPLL_CFGCR2_KDIV(x) (x<<5)
#define DPLL_CFGCR2_KDIV(x) ((x)<<5)
#define DPLL_CFGCR2_KDIV_5 (0<<5)
#define DPLL_CFGCR2_KDIV_2 (1<<5)
#define DPLL_CFGCR2_KDIV_3 (2<<5)
#define DPLL_CFGCR2_KDIV_1 (3<<5)
#define DPLL_CFGCR2_PDIV_MASK (7<<2)
#define DPLL_CFGCR2_PDIV(x) (x<<2)
#define DPLL_CFGCR2_PDIV(x) ((x)<<2)
#define DPLL_CFGCR2_PDIV_1 (0<<2)
#define DPLL_CFGCR2_PDIV_2 (1<<2)
#define DPLL_CFGCR2_PDIV_3 (2<<2)
6548,9 → 7460,28
#define DPLL_CFGCR2_PDIV_7 (4<<2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
 
#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
#define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8)
#define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8)
 
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL 0x6d000
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
#define BXT_DE_PLL_RATIO_MASK 0xff
 
#define BXT_DE_PLL_ENABLE 0x46070
#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
#define BXT_DE_PLL_LOCK (1 << 30)
 
/* GEN9 DC */
#define DC_STATE_EN 0x45504
#define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_DC9 (1<<3)
#define DC_STATE_EN_UPTO_DC6 (2<<0)
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
 
#define DC_STATE_DEBUG 0x45520
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
 
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
6631,19 → 7562,128
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
/* VLV MIPI registers */
/* MIPI DSI registers */
 
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
 
/* BXT MIPI clock controls */
#define BXT_MAX_VAR_OUTPUT_KHZ 39500
 
#define BXT_MIPI_CLOCK_CTL 0x46090
#define BXT_MIPI1_DIV_SHIFT 26
#define BXT_MIPI2_DIV_SHIFT 10
#define BXT_MIPI_DIV_SHIFT(port) \
_MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
BXT_MIPI2_DIV_SHIFT)
/* Var clock divider to generate TX source. Result must be < 39.5 M */
#define BXT_MIPI1_ESCLK_VAR_DIV_MASK (0x3F << 26)
#define BXT_MIPI2_ESCLK_VAR_DIV_MASK (0x3F << 10)
#define BXT_MIPI_ESCLK_VAR_DIV_MASK(port) \
_MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \
BXT_MIPI2_ESCLK_VAR_DIV_MASK)
 
#define BXT_MIPI_ESCLK_VAR_DIV(port, val) \
(val << BXT_MIPI_DIV_SHIFT(port))
/* TX control divider to select actual TX clock output from (8x/var) */
#define BXT_MIPI1_TX_ESCLK_SHIFT 21
#define BXT_MIPI2_TX_ESCLK_SHIFT 5
#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
BXT_MIPI2_TX_ESCLK_SHIFT)
#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (3 << 21)
#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (3 << 5)
#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
#define BXT_MIPI_TX_ESCLK_8XDIV_BY2(port) \
(0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port))
#define BXT_MIPI_TX_ESCLK_8XDIV_BY4(port) \
(0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port))
#define BXT_MIPI_TX_ESCLK_8XDIV_BY8(port) \
(0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port))
/* RX control divider to select actual RX clock output from 8x*/
#define BXT_MIPI1_RX_ESCLK_SHIFT 19
#define BXT_MIPI2_RX_ESCLK_SHIFT 3
#define BXT_MIPI_RX_ESCLK_SHIFT(port) \
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \
BXT_MIPI2_RX_ESCLK_SHIFT)
#define BXT_MIPI1_RX_ESCLK_FIXDIV_MASK (3 << 19)
#define BXT_MIPI2_RX_ESCLK_FIXDIV_MASK (3 << 3)
#define BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port) \
(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
#define BXT_MIPI_RX_ESCLK_8X_BY2(port) \
(1 << BXT_MIPI_RX_ESCLK_SHIFT(port))
#define BXT_MIPI_RX_ESCLK_8X_BY3(port) \
(2 << BXT_MIPI_RX_ESCLK_SHIFT(port))
#define BXT_MIPI_RX_ESCLK_8X_BY4(port) \
(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
/* BXT-A WA: Always prog DPHY dividers to 00 */
#define BXT_MIPI1_DPHY_DIV_SHIFT 16
#define BXT_MIPI2_DPHY_DIV_SHIFT 0
#define BXT_MIPI_DPHY_DIV_SHIFT(port) \
_MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \
BXT_MIPI2_DPHY_DIV_SHIFT)
#define BXT_MIPI_1_DPHY_DIVIDER_MASK (3 << 16)
#define BXT_MIPI_2_DPHY_DIVIDER_MASK (3 << 0)
#define BXT_MIPI_DPHY_DIVIDER_MASK(port) \
(3 << BXT_MIPI_DPHY_DIV_SHIFT(port))
 
/* BXT MIPI mode configure */
#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
#define BXT_MIPI_TRANS_HACTIVE(tc) _MIPI_PORT(tc, \
_BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
 
#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
#define BXT_MIPI_TRANS_VACTIVE(tc) _MIPI_PORT(tc, \
_BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
 
#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
#define BXT_MIPI_TRANS_VTOTAL(tc) _MIPI_PORT(tc, \
_BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
 
#define BXT_DSI_PLL_CTL 0x161000
#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
#define BXT_DSIC_16X_BY2 (1 << 10)
#define BXT_DSIC_16X_BY3 (2 << 10)
#define BXT_DSIC_16X_BY4 (3 << 10)
#define BXT_DSIA_16X_BY2 (1 << 8)
#define BXT_DSIA_16X_BY3 (2 << 8)
#define BXT_DSIA_16X_BY4 (3 << 8)
#define BXT_DSI_FREQ_SEL_SHIFT 8
#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
 
#define BXT_DSI_PLL_RATIO_MAX 0x7D
#define BXT_DSI_PLL_RATIO_MIN 0x22
#define BXT_DSI_PLL_RATIO_MASK 0xFF
#define BXT_REF_CLOCK_KHZ 19500
 
#define BXT_DSI_PLL_ENABLE 0x46080
#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
#define BXT_DSI_PLL_LOCKED (1 << 30)
 
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
_MIPIB_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + B */
#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
 
/* BXT port control */
#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
#define BXT_MIPI_PORT_CTRL(tc) _MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \
_BXT_MIPIC_PORT_CTRL)
 
#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
#define DUAL_LINK_MODE_SHIFT 26
#define DUAL_LINK_MODE_MASK (1 << 26)
#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
#define DITHERING_ENABLE (1 << 25) /* A + B */
#define DITHERING_ENABLE (1 << 25) /* A + C */
#define FLOPPED_HSTX (1 << 23)
#define DE_INVERT (1 << 19) /* XXX */
#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
6650,10 → 7690,10
#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
#define AFE_LATCHOUT (1 << 17)
#define LP_OUTPUT_HOLD (1 << 16)
#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11
#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11
#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
#define CSB_SHIFT 9
#define CSB_MASK (3 << 9)
#define CSB_20MHZ (0 << 9)
6662,10 → 7702,10
#define BANDGAP_MASK (1 << 8)
#define BANDGAP_PNW_CIRCUIT (0 << 8)
#define BANDGAP_LNC_CIRCUIT (1 << 8)
#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */
#define TEARING_EFFECT_SHIFT 2 /* A + B */
#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */
#define TEARING_EFFECT_SHIFT 2 /* A + C */
#define TEARING_EFFECT_MASK (3 << 2)
#define TEARING_EFFECT_OFF (0 << 2)
#define TEARING_EFFECT_DSI (1 << 2)
6677,9 → 7717,9
#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
 
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
_MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
#define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \
_MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
 
6689,9 → 7729,9
/* MIPI DSI Controller and D-PHY registers */
 
#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
_MIPIB_DEVICE_READY)
#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
#define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \
_MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
6700,13 → 7740,13
#define DEVICE_READY (1 << 0)
 
#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
_MIPIB_INTR_STAT)
#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
#define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \
_MIPIC_INTR_STAT)
#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
_MIPIB_INTR_EN)
#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
#define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \
_MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
6741,9 → 7781,9
#define RXSOT_ERROR (1 << 0)
 
#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
_MIPIB_DSI_FUNC_PRG)
#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
#define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \
_MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
6765,33 → 7805,33
#define DATA_LANES_PRG_REG_MASK (7 << 0)
 
#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
_MIPIB_HS_TX_TIMEOUT)
#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
#define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \
_MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
 
#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
_MIPIB_LP_RX_TIMEOUT)
#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
#define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \
_MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
 
#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
_MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \
_MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
 
#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
_MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \
_MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
 
#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
_MIPIB_DPI_RESOLUTION)
#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
#define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \
_MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
6798,9 → 7838,9
#define HORIZONTAL_ADDRESS_MASK 0xffff
 
#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
_MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \
_MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
6807,51 → 7847,51
 
/* regs below are bits 15:0 */
#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
 
#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
_MIPIB_HBP_COUNT)
#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
#define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \
_MIPIC_HBP_COUNT)
 
#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
_MIPIB_HFP_COUNT)
#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
#define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \
_MIPIC_HFP_COUNT)
 
#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
 
#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \
_MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
 
#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
_MIPIB_VBP_COUNT)
#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
#define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \
_MIPIC_VBP_COUNT)
 
#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
_MIPIB_VFP_COUNT)
#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
#define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \
_MIPIC_VFP_COUNT)
 
#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \
_MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
 
/* regs above are bits 15:0 */
 
#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
_MIPIB_DPI_CONTROL)
#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
#define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \
_MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
6861,30 → 7901,30
#define SHUTDOWN (1 << 0)
 
#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
_MIPIB_DPI_DATA)
#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
#define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \
_MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
 
#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
_MIPIB_INIT_COUNT)
#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
#define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \
_MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
 
#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
 
#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
_MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \
_MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
6893,9 → 7933,9
#define VIDEO_MODE_BURST (3 << 0)
 
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
_MIPIB_EOT_DISABLE)
#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
#define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \
_MIPIC_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
6906,32 → 7946,32
#define EOT_DISABLE (1 << 0)
 
#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
_MIPIB_LP_BYTECLK)
#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
#define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \
_MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
 
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
_MIPIB_LP_GEN_DATA)
#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
#define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \
_MIPIC_LP_GEN_DATA)
 
/* bits 31:0 */
#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
_MIPIB_HS_GEN_DATA)
#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
#define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \
_MIPIC_HS_GEN_DATA)
 
#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
_MIPIB_LP_GEN_CTRL)
#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
#define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \
_MIPIC_LP_GEN_CTRL)
#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
_MIPIB_HS_GEN_CTRL)
#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
#define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \
_MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
6939,13 → 7979,13
#define VIRTUAL_CHANNEL_SHIFT 6
#define VIRTUAL_CHANNEL_MASK (3 << 6)
#define DATA_TYPE_SHIFT 0
#define DATA_TYPE_MASK (3f << 0)
#define DATA_TYPE_MASK (0x3f << 0)
/* data type values, see include/video/mipi_display.h */
 
#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
_MIPIB_GEN_FIFO_STAT)
#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
#define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \
_MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
6962,17 → 8002,17
#define HS_DATA_FIFO_FULL (1 << 0)
 
#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
_MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \
_MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
 
#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
_MIPIB_DPHY_PARAM)
#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
#define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \
_MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
6984,16 → 8024,16
 
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
_MIPIB_DBI_BW_CTRL)
#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
#define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \
_MIPIC_DBI_BW_CTRL)
 
#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb088)
#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
_MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \
_MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
7000,20 → 8040,20
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
 
#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
_MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
#define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \
_MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
 
#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
_MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
#define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \
_MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
_MIPIB_INTR_EN_REG_1)
#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
#define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \
_MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
 
/* XXX: only pipe A ?!? */
7032,9 → 8072,9
/* MIPI adapter registers */
 
#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
_MIPIB_CTRL)
#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
#define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \
_MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
7046,25 → 8086,30
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
 
#define BXT_PIPE_SELECT_MASK (7 << 7)
#define BXT_PIPE_SELECT_C (2 << 7)
#define BXT_PIPE_SELECT_B (1 << 7)
#define BXT_PIPE_SELECT_A (0 << 7)
 
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
_MIPIB_DATA_ADDRESS)
#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
_MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
 
#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
_MIPIB_DATA_LENGTH)
#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
#define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \
_MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
 
#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
_MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
#define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \
_MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
7072,22 → 8117,22
#define COMMAND_VALID (1 << 0)
 
#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
_MIPIB_COMMAND_LENGTH)
#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
#define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \
_MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
 
#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
#define MIPI_READ_DATA_RETURN(tc, n) \
(_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
#define MIPI_READ_DATA_RETURN(port, n) \
(_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \
+ 4 * (n)) /* n: 0...7 */
 
#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
_MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
#define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \
_MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
 
/* For UMS only (deprecated): */
7094,4 → 8139,13
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
 
/* MOCS (Memory Object Control State) registers */
#define GEN9_LNCFCMOCS0 0xb020 /* L3 Cache Control base */
 
#define GEN9_GFX_MOCS_0 0xc800 /* Graphics MOCS base register*/
#define GEN9_MFX0_MOCS_0 0xc900 /* Media 0 MOCS base register*/
#define GEN9_MFX1_MOCS_0 0xca00 /* Media 1 MOCS base register*/
#define GEN9_VEBOX_MOCS_0 0xcb00 /* Video MOCS base register*/
#define GEN9_BLT_MOCS_0 0xcc00 /* Blitter MOCS base register*/
 
#endif /* _I915_REG_H_ */
/drivers/video/drm/i915/i915_trace.h
12,17 → 12,16
#define trace_i915_gem_ring_flush(a, b, c)
#define trace_i915_gem_object_bind(a, b)
#define trace_i915_ring_wait_end(x)
#define trace_i915_gem_request_add(a, b)
#define trace_i915_gem_request_retire(a, b)
#define trace_i915_gem_request_wait_begin(a, b)
#define trace_i915_gem_request_wait_end(a, b)
#define trace_i915_gem_request_add(req)
#define trace_i915_gem_request_retire(a)
#define trace_i915_gem_request_wait_begin(req)
#define trace_i915_gem_request_wait_end(req)
#define trace_i915_gem_request_complete(a)
#define trace_intel_gpu_freq_change(a)
#define trace_i915_reg_rw(a, b, c, d, e)
#define trace_i915_ring_wait_begin(a)
#define trace_i915_gem_object_pwrite(a, b, c)
#define trace_i915_gem_request_add(a, b)
#define trace_i915_gem_ring_dispatch(a, b, c)
#define trace_i915_gem_ring_dispatch(a, b)
#define trace_i915_gem_ring_sync_to(a, b, c)
#define trace_i915_vma_bind(a, b)
#define trace_i915_vma_unbind(a)
35,7 → 34,14
#define trace_switch_mm(ring, to)
#define trace_i915_ppgtt_create(base)
#define trace_i915_ppgtt_release(base)
#define trace_i915_pipe_update_start(crtc)
#define trace_i915_pipe_update_vblank_evaded(crtc)
#define trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end)
#define trace_i915_page_directory_pointer_entry_alloc(vm,pml4e,start,GEN8_PML4E_SHIFT)
#define trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT)
#define trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT)
#define trace_i915_page_table_entry_map(vm, pde, pt, index, count, GEN6_PTES)
#define trace_i915_va_alloc(vm,start,size,name)
#define trace_i915_gem_request_notify(ring)
 
 
 
#endif
/drivers/video/drm/i915/i915_vgpu.c
0,0 → 1,264
/*
* Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
 
#include "intel_drv.h"
#include "i915_vgpu.h"
 
/**
* DOC: Intel GVT-g guest support
*
* Intel GVT-g is a graphics virtualization technology which shares the
* GPU among multiple virtual machines on a time-sharing basis. Each
* virtual machine is presented a virtual GPU (vGPU), which has equivalent
* features as the underlying physical GPU (pGPU), so i915 driver can run
* seamlessly in a virtual machine. This file provides vGPU specific
* optimizations when running in a virtual machine, to reduce the complexity
* of vGPU emulation and to improve the overall performance.
*
* A primary function introduced here is so-called "address space ballooning"
* technique. Intel GVT-g partitions global graphics memory among multiple VMs,
* so each VM can directly access a portion of the memory without hypervisor's
* intervention, e.g. filling textures or queuing commands. However with the
* partitioning an unmodified i915 driver would assume a smaller graphics
* memory starting from address ZERO, then requires vGPU emulation module to
* translate the graphics address between 'guest view' and 'host view', for
* all registers and command opcodes which contain a graphics memory address.
* To reduce the complexity, Intel GVT-g introduces "address space ballooning",
* by telling the exact partitioning knowledge to each guest i915 driver, which
* then reserves and prevents non-allocated portions from allocation. Thus vGPU
* emulation module only needs to scan and validate graphics addresses without
* complexity of address translation.
*
*/
 
/**
* i915_check_vgpu - detect virtual GPU
* @dev: drm device *
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
void i915_check_vgpu(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
uint64_t magic;
uint32_t version;
 
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
if (!IS_HASWELL(dev))
return;
 
magic = readq(dev_priv->regs + vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
 
version = INTEL_VGT_IF_VERSION_ENCODE(
readw(dev_priv->regs + vgtif_reg(version_major)),
readw(dev_priv->regs + vgtif_reg(version_minor)));
if (version != INTEL_VGT_IF_VERSION) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
 
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
}
 
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable/unmappable graphic
* memory that might be ballooned. Here, index 0/1 is for mappable
* graphic memory, 2/3 for unmappable graphic memory.
*/
struct drm_mm_node space[4];
};
 
static struct _balloon_info_ bl_info;
 
/**
* intel_vgt_deballoon - deballoon reserved graphics address trunks
*
* This function is called to deallocate the ballooned-out graphic memory, when
* driver is unloaded or when ballooning fails.
*/
void intel_vgt_deballoon(void)
{
int i;
 
DRM_DEBUG("VGT deballoon.\n");
 
for (i = 0; i < 4; i++) {
if (bl_info.space[i].allocated)
drm_mm_remove_node(&bl_info.space[i]);
}
 
memset(&bl_info, 0, sizeof(bl_info));
}
 
static int vgt_balloon_space(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
unsigned long size = end - start;
 
if (start == end)
return -EINVAL;
 
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
 
node->start = start;
node->size = size;
 
return drm_mm_reserve_node(mm, node);
}
 
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
* @dev: drm device
*
* This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as
* reserved. The ballooning related knowledge(starting address and size of
* the mappable/unmappable graphic memory) is described in the vgt_if structure
* in a reserved mmio range.
*
* To give an example, the drawing below depicts one typical scenario after
* ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
* out each for the mappable and the non-mappable part. From the vGPU1 point of
* view, the total size is the same as the physical one, with the start address
* of its graphic space being zero. Yet there are some portions ballooned out(
* the shadow part, which are marked as reserved by drm allocator). From the
* host point of view, the graphic address space is partitioned by multiple
* vGPUs in different VMs.
*
* vGPU1 view Host view
* 0 ------> +-----------+ +-----------+
* ^ |///////////| | vGPU3 |
* | |///////////| +-----------+
* | |///////////| | vGPU2 |
* | +-----------+ +-----------+
* mappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
* | |///////////| | |
* v |///////////| | Host |
* +=======+===========+ +===========+
* ^ |///////////| | vGPU3 |
* | |///////////| +-----------+
* | |///////////| | vGPU2 |
* | +-----------+ +-----------+
* unmappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
* | |///////////| | |
* | |///////////| | Host |
* v |///////////| | |
* total GM size ------> +-----------+ +-----------+
*
* Returns:
* zero on success, non-zero if configuration invalid or ballooning failed
*/
int intel_vgt_balloon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
 
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
int ret;
 
mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
 
mappable_end = mappable_base + mappable_size;
unmappable_end = unmappable_base + unmappable_size;
 
DRM_INFO("VGT ballooning configuration:\n");
DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
mappable_base, mappable_size / 1024);
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
 
if (mappable_base < ggtt_vm->start ||
mappable_end > dev_priv->gtt.mappable_end ||
unmappable_base < dev_priv->gtt.mappable_end ||
unmappable_end > ggtt_vm_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL;
}
 
/* Unmappable graphic memory ballooning */
if (unmappable_base > dev_priv->gtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[2],
dev_priv->gtt.mappable_end,
unmappable_base);
 
if (ret)
goto err;
}
 
/*
* No need to partition out the last physical page,
* because it is reserved to the guard page.
*/
if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[3],
unmappable_end,
ggtt_vm_end - PAGE_SIZE);
if (ret)
goto err;
}
 
/* Mappable graphic memory ballooning */
if (mappable_base > ggtt_vm->start) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[0],
ggtt_vm->start, mappable_base);
 
if (ret)
goto err;
}
 
if (mappable_end < dev_priv->gtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[1],
mappable_end,
dev_priv->gtt.mappable_end);
 
if (ret)
goto err;
}
 
DRM_INFO("VGT balloon successfully\n");
return 0;
 
err:
DRM_ERROR("VGT balloon fail\n");
intel_vgt_deballoon();
return ret;
}
/drivers/video/drm/i915/i915_vgpu.h
0,0 → 1,121
/*
* Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
 
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
 
/* The MMIO offset of the shared info between guest and host emulator */
#define VGT_PVINFO_PAGE 0x78000
#define VGT_PVINFO_SIZE 0x1000
 
/*
* The following structure pages are defined in GEN MMIO space
* for virtualization. (One page for now)
*/
#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
#define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0
 
#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
#define INTEL_VGT_IF_VERSION \
INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
 
/*
* notifications from guest to vgpu device model
*/
enum vgt_g2v_type {
VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
VGT_G2V_EXECLIST_CONTEXT_CREATE,
VGT_G2V_EXECLIST_CONTEXT_DESTROY,
VGT_G2V_MAX,
};
 
struct vgt_if {
uint64_t magic; /* VGT_MAGIC */
uint16_t version_major;
uint16_t version_minor;
uint32_t vgt_id; /* ID of vGT instance */
uint32_t rsv1[12]; /* pad to offset 0x40 */
/*
* Data structure to describe the balooning info of resources.
* Each VM can only have one portion of continuous area for now.
* (May support scattered resource in future)
* (starting from offset 0x40)
*/
struct {
/* Aperture register balooning */
struct {
uint32_t base;
uint32_t size;
} mappable_gmadr; /* aperture */
/* GMADR register balooning */
struct {
uint32_t base;
uint32_t size;
} nonmappable_gmadr; /* non aperture */
/* allowed fence registers */
uint32_t fence_num;
uint32_t rsv2[3];
} avail_rs; /* available/assigned resource */
uint32_t rsv3[0x200 - 24]; /* pad to half page */
/*
* The bottom half page is for response from Gfx driver to hypervisor.
*/
uint32_t rsv4;
uint32_t display_ready; /* ready for display owner switch */
 
uint32_t rsv5[4];
 
uint32_t g2v_notify;
uint32_t rsv6[7];
 
uint32_t pdp0_lo;
uint32_t pdp0_hi;
uint32_t pdp1_lo;
uint32_t pdp1_hi;
uint32_t pdp2_lo;
uint32_t pdp2_hi;
uint32_t pdp3_lo;
uint32_t pdp3_hi;
 
uint32_t execlist_context_descriptor_lo;
uint32_t execlist_context_descriptor_hi;
 
uint32_t rsv7[0x200 - 24]; /* pad to one page */
} __packed;
 
#define vgtif_reg(x) \
(VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
 
/* vGPU display status to be used by the host side */
#define VGT_DRV_DISPLAY_NOT_READY 0
#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
 
extern void i915_check_vgpu(struct drm_device *dev);
extern int intel_vgt_balloon(struct drm_device *dev);
extern void intel_vgt_deballoon(void);
 
#endif /* _I915_VGPU_H_ */
/drivers/video/drm/i915/intel_atomic.c
0,0 → 1,311
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
/**
* DOC: atomic modeset support
*
* The functions here implement the state management and hardware programming
* dispatch required by the atomic modeset infrastructure.
* See intel_atomic_plane.c for the plane-specific atomic functionality.
*/
 
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
 
/**
* intel_connector_atomic_get_property - fetch connector property value
* @connector: connector to fetch property for
* @state: state containing the property value
* @property: property to look up
* @val: pointer to write property value into
*
* The DRM core does not store shadow copies of properties for
* atomic-capable drivers. This entrypoint is used to fetch
* the current value of a driver-specific connector property.
*/
int
intel_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val)
{
int i;
 
/*
* TODO: We only have atomic modeset for planes at the moment, so the
* crtc/connector code isn't quite ready yet. Until it's ready,
* continue to look up all property values in the DRM's shadow copy
* in obj->properties->values[].
*
* When the crtc/connector state work matures, this function should
* be updated to read the values out of the state structure instead.
*/
for (i = 0; i < connector->base.properties->count; i++) {
if (connector->base.properties->properties[i] == property) {
*val = connector->base.properties->values[i];
return 0;
}
}
 
return -EINVAL;
}
 
/*
* intel_crtc_duplicate_state - duplicate crtc state
* @crtc: drm crtc
*
* Allocates and returns a copy of the crtc state (both common and
* Intel-specific) for the specified crtc.
*
* Returns: The newly allocated crtc state, or NULL on failure.
*/
struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct intel_crtc_state *crtc_state;
 
crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
return NULL;
 
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
 
crtc_state->update_pipe = false;
 
return &crtc_state->base;
}
 
/**
* intel_crtc_destroy_state - destroy crtc state
* @crtc: drm crtc
*
* Destroys the crtc state (both common and Intel-specific) for the
* specified crtc.
*/
void
intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
 
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
* @dev: DRM device
* @crtc: intel crtc
* @crtc_state: incoming crtc_state to validate and setup scalers
*
* This function sets up scalers based on staged scaling requests for
* a @crtc and its planes. It is called from crtc level check path. If request
* is a supportable request, it attaches scalers to requested planes and crtc.
*
* This function takes into account the current scaler(s) in use by any planes
* not being part of this atomic state
*
* Returns:
* 0 - scalers were setup succesfully
* error code - otherwise
*/
int intel_atomic_setup_scalers(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_plane *plane = NULL;
struct intel_plane *intel_plane;
struct intel_plane_state *plane_state = NULL;
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct drm_atomic_state *drm_state = crtc_state->base.state;
int num_scalers_need;
int i, j;
 
num_scalers_need = hweight32(scaler_state->scaler_users);
 
/*
* High level flow:
* - staged scaler requests are already in scaler_state->scaler_users
* - check whether staged scaling requests can be supported
* - add planes using scalers that aren't in current transaction
* - assign scalers to requested users
* - as part of plane commit, scalers will be committed
* (i.e., either attached or detached) to respective planes in hw
* - as part of crtc_commit, scaler will be either attached or detached
* to crtc in hw
*/
 
/* fail if required scalers > available scalers */
if (num_scalers_need > intel_crtc->num_scalers){
DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
num_scalers_need, intel_crtc->num_scalers);
return -EINVAL;
}
 
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
int *scaler_id;
const char *name;
int idx;
 
/* skip if scaler not required */
if (!(scaler_state->scaler_users & (1 << i)))
continue;
 
if (i == SKL_CRTC_INDEX) {
name = "CRTC";
idx = intel_crtc->base.base.id;
 
/* panel fitter case: assign as a crtc scaler */
scaler_id = &scaler_state->scaler_id;
} else {
name = "PLANE";
 
/* plane scaler case: assign as a plane scaler */
/* find the plane that set the bit as scaler_user */
plane = drm_state->planes[i];
 
/*
* to enable/disable hq mode, add planes that are using scaler
* into this transaction
*/
if (!plane) {
struct drm_plane_state *state;
plane = drm_plane_from_index(dev, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
plane->base.id);
return PTR_ERR(state);
}
 
/*
* the plane is added after plane checks are run,
* but since this plane is unchanged just do the
* minimum required validation.
*/
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
intel_crtc->atomic.wait_for_flips = true;
crtc_state->base.planes_changed = true;
}
 
intel_plane = to_intel_plane(plane);
idx = plane->base.id;
 
/* plane on different crtc cannot be a scaler user of this crtc */
if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
continue;
}
 
plane_state = to_intel_plane_state(drm_state->plane_states[i]);
scaler_id = &plane_state->scaler_id;
}
 
if (*scaler_id < 0) {
/* find a free scaler */
for (j = 0; j < intel_crtc->num_scalers; j++) {
if (!scaler_state->scalers[j].in_use) {
scaler_state->scalers[j].in_use = 1;
*scaler_id = j;
DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
intel_crtc->pipe, *scaler_id, name, idx);
break;
}
}
}
 
if (WARN_ON(*scaler_id < 0)) {
DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
continue;
}
 
/* set scaler mode */
if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
/*
* when only 1 scaler is in use on either pipe A or B,
* scaler 0 operates in high quality (HQ) mode.
* In this case use scaler 0 to take advantage of HQ mode
*/
*scaler_id = 0;
scaler_state->scalers[0].in_use = 1;
scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
scaler_state->scalers[1].in_use = 0;
} else {
scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
}
}
 
return 0;
}
 
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll_config *shared_dpll)
{
enum intel_dpll_id i;
 
/* Copy shared dpll state */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
shared_dpll[i] = pll->config;
}
}
 
struct intel_shared_dpll_config *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
 
WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
 
if (!state->dpll_set) {
state->dpll_set = true;
 
intel_atomic_duplicate_dpll_state(to_i915(s->dev),
state->shared_dpll);
}
 
return state->shared_dpll;
}
 
struct drm_atomic_state *
intel_atomic_state_alloc(struct drm_device *dev)
{
struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
 
if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
kfree(state);
return NULL;
}
 
return &state->base;
}
 
void intel_atomic_state_clear(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
state->dpll_set = false;
}
/drivers/video/drm/i915/intel_atomic_plane.c
0,0 → 1,247
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
/**
* DOC: atomic plane helpers
*
* The functions here are used by the atomic plane helper functions to
* implement legacy plane updates (i.e., drm_plane->update_plane() and
* drm_plane->disable_plane()). This allows plane updates to use the
* atomic state infrastructure and perform plane updates as separate
* prepare/check/commit/cleanup steps.
*/
 
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
 
/**
* intel_create_plane_state - create plane state object
* @plane: drm plane
*
* Allocates a fresh plane state for the given plane and sets some of
* the state values to sensible initial values.
*
* Returns: A newly allocated plane state, or NULL on failure
*/
struct intel_plane_state *
intel_create_plane_state(struct drm_plane *plane)
{
struct intel_plane_state *state;
 
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
 
state->base.plane = plane;
state->base.rotation = BIT(DRM_ROTATE_0);
state->ckey.flags = I915_SET_COLORKEY_NONE;
 
return state;
}
 
/**
* intel_plane_duplicate_state - duplicate plane state
* @plane: drm plane
*
* Allocates and returns a copy of the plane state (both common and
* Intel-specific) for the specified plane.
*
* Returns: The newly allocated plane state, or NULL on failure.
*/
struct drm_plane_state *
intel_plane_duplicate_state(struct drm_plane *plane)
{
struct drm_plane_state *state;
struct intel_plane_state *intel_state;
 
intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
 
if (!intel_state)
return NULL;
 
state = &intel_state->base;
 
__drm_atomic_helper_plane_duplicate_state(plane, state);
 
return state;
}
 
/**
* intel_plane_destroy_state - destroy plane state
* @plane: drm plane
* @state: state object to destroy
*
* Destroys the plane state (both common and Intel-specific) for the
* specified plane.
*/
void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
drm_atomic_helper_plane_destroy_state(plane, state);
}
 
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state = to_intel_plane_state(state);
struct drm_crtc_state *drm_crtc_state;
int ret;
 
crtc = crtc ? crtc : plane->state->crtc;
intel_crtc = to_intel_crtc(crtc);
 
/*
* Both crtc and plane->crtc could be NULL if we're updating a
* property while the plane is disabled. We don't actually have
* anything driver-specific we need to test in that case, so
* just return success.
*/
if (!crtc)
return 0;
 
drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
if (WARN_ON(!drm_crtc_state))
return -EINVAL;
 
crtc_state = to_intel_crtc_state(drm_crtc_state);
 
/*
* The original src/dest coordinates are stored in state->base, but
* we want to keep another copy internal to our driver that we can
* clip/modify ourselves.
*/
intel_state->src.x1 = state->src_x;
intel_state->src.y1 = state->src_y;
intel_state->src.x2 = state->src_x + state->src_w;
intel_state->src.y2 = state->src_y + state->src_h;
intel_state->dst.x1 = state->crtc_x;
intel_state->dst.y1 = state->crtc_y;
intel_state->dst.x2 = state->crtc_x + state->crtc_w;
intel_state->dst.y2 = state->crtc_y + state->crtc_h;
 
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
crtc_state->base.active ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 =
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
 
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
 
/*
* 90/270 is not allowed with RGB64 16:16:16:16,
* RGB 16-bit 5:6:5, and Indexed 8-bit.
* TBD: Add RGB64 case once its added in supported format list.
*/
switch (state->fb->pixel_format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(state->fb->pixel_format));
return -EINVAL;
 
default:
break;
}
}
 
intel_state->visible = false;
ret = intel_plane->check_plane(plane, crtc_state, intel_state);
if (ret)
return ret;
 
return intel_plane_atomic_calc_changes(&crtc_state->base, state);
}
 
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
 
intel_plane->commit_plane(plane, intel_state);
}
 
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
.atomic_check = intel_plane_atomic_check,
.atomic_update = intel_plane_atomic_update,
};
 
/**
* intel_plane_atomic_get_property - fetch plane property value
* @plane: plane to fetch property for
* @state: state containing the property value
* @property: property to look up
* @val: pointer to write property value into
*
* The DRM core does not store shadow copies of properties for
* atomic-capable drivers. This entrypoint is used to fetch
* the current value of a driver-specific plane property.
*/
int
intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val)
{
DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
return -EINVAL;
}
 
/**
* intel_plane_atomic_set_property - set plane property value
* @plane: plane to set property for
* @state: state to update property value in
* @property: property to set
* @val: value to set property to
*
* Writes the specified property value for a plane into the provided atomic
* state object.
*
* Returns 0 on success, -EINVAL on unrecognized properties
*/
int
intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val)
{
DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
return -EINVAL;
}
/drivers/video/drm/i915/intel_audio.c
22,10 → 22,12
*/
 
#include <linux/kernel.h>
#include <linux/component.h>
#include <drm/i915_component.h>
#include "intel_drv.h"
 
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h"
 
/**
39,7 → 41,8
*
* The disable sequences must be performed before disabling the transcoder or
* port. The enable sequences may only be performed after enabling the
* transcoder and port, and after completed link training.
* transcoder and port, and after completed link training. Therefore the audio
* enable/disable sequences are part of the modeset sequence.
*
* The codec and controller sequences could be done either parallel or serial,
* but generally the ELDV/PD change in the codec sequence indicates to the audio
47,6 → 50,11
* co-operation between the graphics and audio drivers is handled via audio
* related registers. (The notable exception is the power management, not
* covered here.)
*
* The struct i915_audio_component is used to interact between the graphics
* and audio drivers. The struct i915_audio_component_ops *ops in it is
* defined in graphics driver and called in audio driver. The
* struct i915_audio_component_audio_ops *audio_ops is called from i915 driver.
*/
 
static const struct {
53,30 → 61,56
int clock;
u32 config;
} hdmi_audio_clock[] = {
{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
{ 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
{ 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
{ 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
{ 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
};
 
/* HDMI N/CTS table */
#define TMDS_297M 297000
#define TMDS_296M 296703
static const struct {
int sample_rate;
int clock;
int n;
int cts;
} aud_ncts[] = {
{ 44100, TMDS_296M, 4459, 234375 },
{ 44100, TMDS_297M, 4704, 247500 },
{ 48000, TMDS_296M, 5824, 281250 },
{ 48000, TMDS_297M, 5120, 247500 },
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
{ 88200, TMDS_296M, 8918, 234375 },
{ 88200, TMDS_297M, 9408, 247500 },
{ 96000, TMDS_296M, 11648, 281250 },
{ 96000, TMDS_297M, 10240, 247500 },
{ 176400, TMDS_296M, 17836, 234375 },
{ 176400, TMDS_297M, 18816, 247500 },
{ 192000, TMDS_296M, 23296, 281250 },
{ 192000, TMDS_297M, 20480, 247500 },
};
 
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted_mode)
{
int i;
 
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
if (mode->clock == hdmi_audio_clock[i].clock)
if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock)
break;
}
 
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
i = 1;
}
 
87,6 → 121,45
return hdmi_audio_clock[i].config;
}
 
static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
{
int i;
 
for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
if ((rate == aud_ncts[i].sample_rate) &&
(mode->clock == aud_ncts[i].clock)) {
return aud_ncts[i].n;
}
}
return 0;
}
 
static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
{
int n_low, n_up;
uint32_t tmp = val;
 
n_low = n & 0xfff;
n_up = (n >> 12) & 0xff;
tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
(n_low << AUD_CONFIG_LOWER_N_SHIFT) |
AUD_CONFIG_N_PROG_ENABLE);
return tmp;
}
 
/* check whether N/CTS/M need be set manually */
static bool audio_rate_need_prog(struct intel_crtc *crtc,
const struct drm_display_mode *mode)
{
if (((mode->clock == TMDS_297M) ||
(mode->clock == TMDS_296M)) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
return true;
else
return false;
}
 
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
135,7 → 208,7
 
static void g4x_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
181,6 → 254,8
 
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
 
mutex_lock(&dev_priv->av_mutex);
 
/* Disable timestamps */
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
196,22 → 271,31
tmp &= ~AUDIO_ELD_VALID(pipe);
tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
 
mutex_unlock(&dev_priv->av_mutex);
}
 
static void hsw_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
struct i915_audio_component *acomp = dev_priv->audio_component;
const uint8_t *eld = connector->eld;
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
enum port port = intel_dig_port->port;
uint32_t tmp;
int len, i;
int n, rate;
 
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
pipe_name(pipe), drm_eld_size(eld));
 
mutex_lock(&dev_priv->av_mutex);
 
/* Enable audio presence detect, invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_OUTPUT_ENABLE(pipe);
243,13 → 327,32
/* Enable timestamps */
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(mode);
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
 
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
if (!acomp)
rate = 0;
else if (port >= PORT_A && port <= PORT_E)
rate = acomp->aud_sample_rate[port];
else {
DRM_ERROR("invalid port: %d\n", port);
rate = 0;
}
n = audio_config_get_n(adjusted_mode, rate);
if (n != 0)
tmp = audio_config_setup_n_reg(n, tmp);
else
DRM_DEBUG_KMS("no suitable N value is found\n");
}
 
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
mutex_unlock(&dev_priv->av_mutex);
}
 
static void ilk_audio_codec_disable(struct intel_encoder *encoder)
267,6 → 370,9
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
 
if (WARN_ON(port == PORT_A))
return;
 
if (HAS_PCH_IBX(dev_priv->dev)) {
aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
288,12 → 394,7
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
 
if (WARN_ON(!port)) {
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
IBX_ELD_VALID(PORT_D);
} else {
eldv = IBX_ELD_VALID(port);
}
 
/* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2);
303,7 → 404,7
 
static void ilk_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
323,6 → 424,9
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld));
 
if (WARN_ON(port == PORT_A))
return;
 
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
* disabled during the mode set. The proper fix would be to push the
347,12 → 451,7
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
 
if (WARN_ON(!port)) {
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
IBX_ELD_VALID(PORT_D);
} else {
eldv = IBX_ELD_VALID(port);
}
 
/* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2);
382,7 → 481,7
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(mode);
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
I915_WRITE(aud_config, tmp);
}
 
397,12 → 496,15
{
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
struct drm_display_mode *mode = &crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
enum port port = intel_dig_port->port;
 
connector = drm_select_eld(encoder, mode);
connector = drm_select_eld(encoder);
if (!connector)
return;
 
417,26 → 519,37
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
connector->eld[5] |= (1 << 2);
 
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
 
if (dev_priv->display.audio_codec_enable)
dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
dev_priv->display.audio_codec_enable(connector, intel_encoder,
adjusted_mode);
 
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
}
 
/**
* intel_audio_codec_disable - Disable the audio codec for HD audio
* @encoder: encoder on which to disable audio
* @intel_encoder: encoder on which to disable audio
*
* The disable sequences must be performed before disabling the transcoder or
* port.
*/
void intel_audio_codec_disable(struct intel_encoder *encoder)
void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
enum port port = intel_dig_port->port;
 
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(encoder);
dev_priv->display.audio_codec_disable(intel_encoder);
 
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
}
 
/**
461,3 → 574,214
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
}
}
 
static void i915_audio_component_get_power(struct device *dev)
{
intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
}
 
static void i915_audio_component_put_power(struct device *dev)
{
intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
}
 
static void i915_audio_component_codec_wake_override(struct device *dev,
bool enable)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
u32 tmp;
 
if (!IS_SKYLAKE(dev_priv))
return;
 
/*
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
*/
tmp = I915_READ(HSW_AUD_CHICKENBIT);
tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
 
if (enable) {
tmp = I915_READ(HSW_AUD_CHICKENBIT);
tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
}
}
 
/* Get CDCLK in kHz */
static int i915_audio_component_get_cdclk_freq(struct device *dev)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
int ret;
 
if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV;
 
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
 
return ret;
}
 
static int i915_audio_component_sync_audio_rate(struct device *dev,
int port, int rate)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
struct drm_device *drm_dev = dev_priv->dev;
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
struct intel_crtc *crtc;
struct drm_display_mode *mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
enum pipe pipe = -1;
u32 tmp;
int n;
 
/* HSW, BDW SKL need this fix */
if (!IS_SKYLAKE(dev_priv) &&
!IS_BROADWELL(dev_priv) &&
!IS_HASWELL(dev_priv))
return 0;
 
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
for_each_intel_encoder(drm_dev, intel_encoder) {
if (intel_encoder->type != INTEL_OUTPUT_HDMI)
continue;
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
if (port == intel_dig_port->port) {
crtc = to_intel_crtc(intel_encoder->base.crtc);
if (!crtc) {
DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__);
continue;
}
pipe = crtc->pipe;
break;
}
}
 
if (pipe == INVALID_PIPE) {
DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
mutex_unlock(&dev_priv->av_mutex);
return -ENODEV;
}
DRM_DEBUG_KMS("pipe %c connects port %c\n",
pipe_name(pipe), port_name(port));
mode = &crtc->config->base.adjusted_mode;
 
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
 
/* 2. check whether to set the N/CTS/M manually or not */
if (!audio_rate_need_prog(crtc, mode)) {
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
mutex_unlock(&dev_priv->av_mutex);
return 0;
}
 
n = audio_config_get_n(mode, rate);
if (n == 0) {
DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
port_name(port));
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
mutex_unlock(&dev_priv->av_mutex);
return 0;
}
 
/* 3. set the N/CTS/M */
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp = audio_config_setup_n_reg(n, tmp);
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
mutex_unlock(&dev_priv->av_mutex);
return 0;
}
 
static const struct i915_audio_component_ops i915_audio_component_ops = {
.owner = THIS_MODULE,
.get_power = i915_audio_component_get_power,
.put_power = i915_audio_component_put_power,
.codec_wake_override = i915_audio_component_codec_wake_override,
.get_cdclk_freq = i915_audio_component_get_cdclk_freq,
.sync_audio_rate = i915_audio_component_sync_audio_rate,
};
 
static int i915_audio_component_bind(struct device *i915_dev,
struct device *hda_dev, void *data)
{
struct i915_audio_component *acomp = data;
struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
int i;
 
if (WARN_ON(acomp->ops || acomp->dev))
return -EEXIST;
 
drm_modeset_lock_all(dev_priv->dev);
acomp->ops = &i915_audio_component_ops;
acomp->dev = i915_dev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
dev_priv->audio_component = acomp;
drm_modeset_unlock_all(dev_priv->dev);
 
return 0;
}
 
static void i915_audio_component_unbind(struct device *i915_dev,
struct device *hda_dev, void *data)
{
struct i915_audio_component *acomp = data;
struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
drm_modeset_lock_all(dev_priv->dev);
acomp->ops = NULL;
acomp->dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(dev_priv->dev);
}
 
static const struct component_ops i915_audio_component_bind_ops = {
.bind = i915_audio_component_bind,
.unbind = i915_audio_component_unbind,
};
 
/**
* i915_audio_component_init - initialize and register the audio component
* @dev_priv: i915 device instance
*
* This will register with the component framework a child component which
* will bind dynamically to the snd_hda_intel driver's corresponding master
* component when the latter is registered. During binding the child
* initializes an instance of struct i915_audio_component which it receives
* from the master. The master can then start to use the interface defined by
* this struct. Each side can break the binding at any point by deregistering
* its own component after which each side's component unbind callback is
* called.
*
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
}
 
/**
* i915_audio_component_cleanup - deregister the audio component
* @dev_priv: i915 device instance
*
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
}
/drivers/video/drm/i915/intel_bios.c
36,12 → 36,13
 
static int panel_type;
 
static void *
find_section(struct bdb_header *bdb, int section_id)
static const void *
find_section(const void *_bdb, int section_id)
{
u8 *base = (u8 *)bdb;
const struct bdb_header *bdb = _bdb;
const u8 *base = _bdb;
int index = 0;
u16 total, current_size;
u32 total, current_size;
u8 current_id;
 
/* skip to first section */
53,9 → 54,13
current_id = *(base + index);
index++;
 
current_size = *((u16 *)(base + index));
current_size = *((const u16 *)(base + index));
index += 2;
 
/* The MIPI Sequence Block v3+ has a separate size field. */
if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
current_size = *((const u32 *)(base + index + 1));
 
if (index + current_size > total)
return NULL;
 
69,7 → 74,7
}
 
static u16
get_blocksize(void *p)
get_blocksize(const void *p)
{
u16 *block_ptr, block_size;
 
121,42 → 126,6
drm_mode_set_name(panel_fixed_mode);
}
 
static bool
lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
const struct lvds_dvo_timing *b)
{
if (a->hactive_hi != b->hactive_hi ||
a->hactive_lo != b->hactive_lo)
return false;
 
if (a->hsync_off_hi != b->hsync_off_hi ||
a->hsync_off_lo != b->hsync_off_lo)
return false;
 
if (a->hsync_pulse_width != b->hsync_pulse_width)
return false;
 
if (a->hblank_hi != b->hblank_hi ||
a->hblank_lo != b->hblank_lo)
return false;
 
if (a->vactive_hi != b->vactive_hi ||
a->vactive_lo != b->vactive_lo)
return false;
 
if (a->vsync_off != b->vsync_off)
return false;
 
if (a->vsync_pulse_width != b->vsync_pulse_width)
return false;
 
if (a->vblank_hi != b->vblank_hi ||
a->vblank_lo != b->vblank_lo)
return false;
 
return true;
}
 
static const struct lvds_dvo_timing *
get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
204,7 → 173,7
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
const struct bdb_lvds_options *lvds_options;
const struct bdb_lvds_lfp_data *lvds_lfp_data;
212,7 → 181,7
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int i, downclock, drrs_mode;
int drrs_mode;
 
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
271,30 → 240,6
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
 
/*
* Iterate over the LVDS panel timing info to find the lowest clock
* for the native resolution.
*/
downclock = panel_dvo_timing->clock;
for (i = 0; i < 16; i++) {
const struct lvds_dvo_timing *dvo_timing;
 
dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
i);
if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
dvo_timing->clock < downclock)
downclock = dvo_timing->clock;
}
 
if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = downclock * 10;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
"Normal Clock %dKHz, downclock %dKHz\n",
panel_fixed_mode->clock, 10*downclock);
}
 
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
310,7 → 255,8
}
 
static void
parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
parse_lfp_backlight(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
348,9 → 294,9
/* Try to find sdvo panel data */
static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
struct lvds_dvo_timing *dvo_timing;
const struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
int index;
 
361,7 → 307,7
}
 
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
 
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
402,10 → 348,10
 
static void
parse_general_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general;
const struct bdb_general_features *general;
 
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
428,9 → 374,9
 
static void
parse_general_definitions(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
struct bdb_general_definitions *general;
const struct bdb_general_definitions *general;
 
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
438,7 → 384,7
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_port_valid(bus_pin))
if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
dev_priv->vbt.crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
447,13 → 393,19
}
}
 
static const union child_device_config *
child_device_ptr(const struct bdb_general_definitions *p_defs, int i)
{
return (const void *) &p_defs->devices[i * p_defs->child_dev_size];
}
 
static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs;
union child_device_config *p_child;
const struct bdb_general_definitions *p_defs;
const struct old_child_dev_config *child; /* legacy */
int i, child_device_num, count;
u16 block_size;
 
462,14 → 414,14
DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
/* judge whether the size of child device meets the requirements.
* If the child device size obtained from general definition block
* is different with sizeof(struct child_device_config), skip the
* parsing of sdvo device info
 
/*
* Only parse SDVO mappings when the general definitions block child
* device size matches that of the *legacy* child device config
* struct. Thus, SDVO mapping will be skipped for newer VBT.
*/
if (p_defs->child_dev_size != sizeof(*p_child)) {
/* different child dev size . Ignore it */
DRM_DEBUG_KMS("different child size is found. Invalid.\n");
if (p_defs->child_dev_size != sizeof(*child)) {
DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
return;
}
/* get the block size of general definitions */
476,16 → 428,16
block_size = get_blocksize(p_defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
sizeof(*p_child);
p_defs->child_dev_size;
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
if (!p_child->old.device_type) {
child = &child_device_ptr(p_defs, i)->old;
if (!child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
if (p_child->old.slave_addr != SLAVE_ADDR1 &&
p_child->old.slave_addr != SLAVE_ADDR2) {
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
/*
* If the slave address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
492,8 → 444,8
*/
continue;
}
if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
p_child->old.dvo_port != DEVICE_PORT_DVOC) {
if (child->dvo_port != DEVICE_PORT_DVOB &&
child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
500,16 → 452,16
}
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
" %s port\n",
p_child->old.slave_addr,
(p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
if (!p_mapping->initialized) {
p_mapping->dvo_port = p_child->old.dvo_port;
p_mapping->slave_addr = p_child->old.slave_addr;
p_mapping->dvo_wiring = p_child->old.dvo_wiring;
p_mapping->ddc_pin = p_child->old.ddc_pin;
p_mapping->i2c_pin = p_child->old.i2c_pin;
p_mapping->dvo_port = child->dvo_port;
p_mapping->slave_addr = child->slave_addr;
p_mapping->dvo_wiring = child->dvo_wiring;
p_mapping->ddc_pin = child->ddc_pin;
p_mapping->i2c_pin = child->i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
521,7 → 473,7
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
if (p_child->old.slave2_addr) {
if (child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
539,9 → 491,9
 
static void
parse_driver_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
struct bdb_driver_features *driver;
const struct bdb_driver_features *driver;
 
driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver)
565,11 → 517,11
}
 
static void
parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
struct bdb_edp *edp;
struct edp_power_seq *edp_pps;
struct edp_link_params *edp_link_params;
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_link_params *edp_link_params;
 
edp = find_section(bdb, BDB_EDP);
if (!edp) {
662,8 → 614,64
edp_link_params->vswing);
break;
}
 
if (bdb->version >= 173) {
uint8_t vswing;
 
/* Don't read from VBT if module parameter has valid value*/
if (i915.edp_vswing) {
dev_priv->edp_low_vswing = i915.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
dev_priv->edp_low_vswing = vswing == 0;
}
}
}
 
static void
parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
 
psr = find_section(bdb, BDB_PSR);
if (!psr) {
DRM_DEBUG_KMS("No PSR BDB found.\n");
return;
}
 
psr_table = &psr->psr_table[panel_type];
 
dev_priv->vbt.psr.full_link = psr_table->full_link;
dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
 
/* Allowed VBT values goes from 0 to 15 */
dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
 
switch (psr_table->lines_to_wait) {
case 0:
dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
break;
case 1:
dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
break;
case 2:
dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
break;
case 3:
dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
break;
default:
DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
psr_table->lines_to_wait);
break;
}
 
dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
 
static u8 *goto_next_sequence(u8 *data, int *size)
{
u16 len;
732,13 → 740,14
}
 
static void
parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
struct bdb_mipi_config *start;
struct bdb_mipi_sequence *sequence;
struct mipi_config *config;
struct mipi_pps_data *pps;
u8 *data, *seq_data;
const struct bdb_mipi_config *start;
const struct bdb_mipi_sequence *sequence;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
u8 *data;
const u8 *seq_data;
int i, panel_id, seq_size;
u16 block_size;
 
794,6 → 803,12
return;
}
 
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 3) {
DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
return;
}
 
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
 
block_size = get_blocksize(sequence);
881,8 → 896,19
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
 
static u8 translate_iboost(u8 val)
{
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
 
if (val >= ARRAY_SIZE(mapping)) {
DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
return 0;
}
return mapping[val];
}
 
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
union child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
889,16 → 915,16
uint8_t hdmi_level_shift;
int i, j;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
uint8_t aux_channel;
uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port and abort if more
* than one is found. */
int dvo_ports[][2] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA},
{DVO_PORT_HDMIB, DVO_PORT_DPB},
{DVO_PORT_HDMIC, DVO_PORT_DPC},
{DVO_PORT_HDMID, DVO_PORT_DPD},
{DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
int dvo_ports[][3] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
{DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
{DVO_PORT_HDMID, DVO_PORT_DPD, -1},
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
};
 
/* Find the child device to use, abort if more than one found. */
905,7 → 931,7
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i;
 
for (j = 0; j < 2; j++) {
for (j = 0; j < 3; j++) {
if (dvo_ports[port][j] == -1)
break;
 
923,6 → 949,7
return;
 
aux_channel = child->raw[25];
ddc_pin = child->common.ddc_pin;
 
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
954,22 → 981,53
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
if (is_dvi) {
if (child->common.ddc_pin == 0x05 && port != PORT_B)
if (port == PORT_E) {
info->alternate_ddc_pin = ddc_pin;
/* if DDIE share ddc pin with other port, then
* dvi/hdmi couldn't exist on the shared port.
* Otherwise they share the same ddc bin and system
* couldn't communicate with them seperately. */
if (ddc_pin == DDC_PIN_B) {
dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
} else if (ddc_pin == DDC_PIN_C) {
dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
} else if (ddc_pin == DDC_PIN_D) {
dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
}
} else if (ddc_pin == DDC_PIN_B && port != PORT_B)
DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
if (child->common.ddc_pin == 0x04 && port != PORT_C)
else if (ddc_pin == DDC_PIN_C && port != PORT_C)
DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
if (child->common.ddc_pin == 0x06 && port != PORT_D)
else if (ddc_pin == DDC_PIN_D && port != PORT_D)
DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
}
 
if (is_dp) {
if (aux_channel == 0x40 && port != PORT_A)
if (port == PORT_E) {
info->alternate_aux_channel = aux_channel;
/* if DDIE share aux channel with other port, then
* DP couldn't exist on the shared port. Otherwise
* they share the same aux channel and system
* couldn't communicate with them seperately. */
if (aux_channel == DP_AUX_A)
dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
else if (aux_channel == DP_AUX_B)
dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
else if (aux_channel == DP_AUX_C)
dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
else if (aux_channel == DP_AUX_D)
dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
}
else if (aux_channel == DP_AUX_A && port != PORT_A)
DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
if (aux_channel == 0x10 && port != PORT_B)
else if (aux_channel == DP_AUX_B && port != PORT_B)
DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
if (aux_channel == 0x20 && port != PORT_C)
else if (aux_channel == DP_AUX_C && port != PORT_C)
DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
if (aux_channel == 0x30 && port != PORT_D)
else if (aux_channel == DP_AUX_D && port != PORT_D)
DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
}
 
981,10 → 1039,20
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
}
 
/* Parse the I_boost config for SKL and above */
if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level);
info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
port_name(port), info->hdmi_boost_level);
}
}
 
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
struct drm_device *dev = dev_priv->dev;
enum port port;
1004,11 → 1072,13
 
static void
parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
const struct bdb_header *bdb)
{
struct bdb_general_definitions *p_defs;
union child_device_config *p_child, *child_dev_ptr;
const struct bdb_general_definitions *p_defs;
const union child_device_config *p_child;
union child_device_config *child_dev_ptr;
int i, child_device_num, count;
u8 expected_size;
u16 block_size;
 
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
1016,25 → 1086,40
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
/* judge whether the size of child device meets the requirements.
* If the child device size obtained from general definition block
* is different with sizeof(struct child_device_config), skip the
* parsing of sdvo device info
*/
if (p_defs->child_dev_size != sizeof(*p_child)) {
/* different child dev size . Ignore it */
DRM_DEBUG_KMS("different child size is found. Invalid.\n");
if (bdb->version < 195) {
expected_size = sizeof(struct old_child_dev_config);
} else if (bdb->version == 195) {
expected_size = 37;
} else if (bdb->version <= 197) {
expected_size = 38;
} else {
expected_size = 38;
BUILD_BUG_ON(sizeof(*p_child) < 38);
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size);
}
 
/* The legacy sized child device config is the minimum we need. */
if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
DRM_ERROR("Child device config size %u is too small.\n",
p_defs->child_dev_size);
return;
}
 
/* Flag an error for unexpected size, but continue anyway. */
if (p_defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
p_defs->child_dev_size, expected_size, bdb->version);
 
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
sizeof(*p_child);
p_defs->child_dev_size;
count = 0;
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
p_child = child_device_ptr(p_defs, i);
if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
1054,7 → 1139,7
dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]);
p_child = child_device_ptr(p_defs, i);
if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
1070,8 → 1155,14
 
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
memcpy((void *)child_dev_ptr, (void *)p_child,
sizeof(*p_child));
 
/*
* Copy as much as we know (sizeof) and is available
* (child_dev_size) of the child device. Accessing the data must
* depend on VBT version.
*/
memcpy(child_dev_ptr, p_child,
min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
}
return;
}
1082,7 → 1173,7
struct drm_device *dev = dev_priv->dev;
enum port port;
 
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
 
/* Default to having backlight */
dev_priv->vbt.backlight.present = true;
1140,19 → 1231,15
{ }
};
 
static struct bdb_header *validate_vbt(char *base, size_t size,
struct vbt_header *vbt,
static const struct bdb_header *validate_vbt(const void *base,
size_t size,
const void *_vbt,
const char *source)
{
size_t offset;
struct bdb_header *bdb;
size_t offset = _vbt - base;
const struct vbt_header *vbt = _vbt;
const struct bdb_header *bdb;
 
if (vbt == NULL) {
DRM_DEBUG_DRIVER("VBT signature missing\n");
return NULL;
}
 
offset = (char *)vbt - base;
if (offset + sizeof(struct vbt_header) > size) {
DRM_DEBUG_DRIVER("VBT header incomplete\n");
return NULL;
1169,7 → 1256,7
return NULL;
}
 
bdb = (struct bdb_header *)(base + offset);
bdb = base + offset;
if (offset + bdb->bdb_size > size) {
DRM_DEBUG_DRIVER("BDB incomplete\n");
return NULL;
1180,6 → 1267,30
return bdb;
}
 
static const struct bdb_header *find_vbt(void __iomem *bios, size_t size)
{
const struct bdb_header *bdb = NULL;
size_t i;
 
/* Scour memory looking for the VBT signature. */
for (i = 0; i + 4 < size; i++) {
if (ioread32(bios + i) == *((const u32 *) "$VBT")) {
/*
* This is the one place where we explicitly discard the
* address space (__iomem) of the BIOS/VBT. From now on
* everything is based on 'base', and treated as regular
* memory.
*/
void *_bios = (void __force *) bios;
 
bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM");
break;
}
}
 
return bdb;
}
 
/**
* intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
1194,7 → 1305,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
struct bdb_header *bdb = NULL;
const struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL;
 
if (HAS_PCH_NOP(dev))
1204,27 → 1315,17
 
/* XXX Should this validation be moved to intel_opregion.c? */
if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
bdb = validate_vbt((char *)dev_priv->opregion.header, OPREGION_SIZE,
(struct vbt_header *)dev_priv->opregion.vbt,
"OpRegion");
bdb = validate_vbt(dev_priv->opregion.header, OPREGION_SIZE,
dev_priv->opregion.vbt, "OpRegion");
 
if (bdb == NULL) {
size_t i, size;
size_t size;
 
bios = pci_map_rom(pdev, &size);
if (!bios)
return -1;
 
/* Scour memory looking for the VBT signature */
for (i = 0; i + 4 < size; i++) {
if (memcmp(bios + i, "$VBT", 4) == 0) {
bdb = validate_vbt(bios, size,
(struct vbt_header *)(bios + i),
"PCI ROM");
break;
}
}
 
bdb = find_vbt(bios, size);
if (!bdb) {
pci_unmap_rom(pdev, bios);
return -1;
1241,6 → 1342,7
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
 
1249,21 → 1351,3
 
return 0;
}
 
/* Ensure that vital registers have been initialised, even if the BIOS
* is absent or just failing to do its job.
*/
void intel_setup_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Set the Panel Power On/Off timings if uninitialized. */
if (!HAS_PCH_SPLIT(dev) &&
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
 
/* Set T3 to 35ms and Tx to 200ms */
I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
}
}
/drivers/video/drm/i915/intel_bios.h
80,7 → 80,7
#define BDB_EXT_MMIO_REGS 6
#define BDB_SWF_IO 7
#define BDB_SWF_MMIO 8
#define BDB_DOT_CLOCK_TABLE 9
#define BDB_PSR 9
#define BDB_MODE_REMOVAL_TABLE 10
#define BDB_CHILD_DEVICE_TABLE 11
#define BDB_DRIVER_FEATURES 12
203,9 → 203,11
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
 
/* We used to keep this struct but without any version control. We should avoid
/*
* We used to keep this struct but without any version control. We should avoid
* using it in the future, but it should be safe to keep using it in the old
* code. */
* code. Do not change; we rely on its size.
*/
struct old_child_dev_config {
u16 handle;
u16 device_type;
231,6 → 233,10
/* This one contains field offsets that are known to be common for all BDB
* versions. Notice that the meaning of the contents contents may still change,
* but at least the offsets are consistent. */
 
/* Definitions for flags_1 */
#define IBOOST_ENABLE (1<<3)
 
struct common_child_dev_config {
u16 handle;
u16 device_type;
239,8 → 245,13
u8 not_common2[2];
u8 ddc_pin;
u16 edid_ptr;
u8 obsolete;
u8 flags_1;
u8 not_common3[13];
u8 iboost_level;
} __packed;
 
 
/* This field changes depending on the BDB version, so the most reliable way to
* read it is by checking the BDB version and reading the raw pointer. */
union child_device_config {
277,9 → 288,9
* And the device num is related with the size of general definition
* block. It is obtained by using the following formula:
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
* defs->child_dev_size;
*/
union child_device_config devices[0];
uint8_t devices[0];
} __packed;
 
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
554,9 → 565,29
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
u64 edp_vswing_preemph; /* v173 */
} __packed;
 
void intel_setup_bios(struct drm_device *dev);
struct psr_table {
/* Feature bits */
u8 full_link:1;
u8 require_aux_to_wakeup:1;
u8 feature_bits_rsvd:6;
 
/* Wait times */
u8 idle_frames:4;
u8 lines_to_wait:3;
u8 wait_times_rsvd:1;
 
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
} __packed;
 
struct bdb_psr {
struct psr_table psr_table[16];
} __packed;
 
int intel_parse_bios(struct drm_device *dev);
 
/*
710,7 → 741,6
*/
#define DEVICE_TYPE_eDP_BITS \
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_NOT_HDMI_OUTPUT | \
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_DUAL_CHANNEL | \
718,7 → 748,6
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_DIGITAL_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
 
/* define the DVO port for HDMI output type */
726,11 → 755,6
#define DVO_C 2
#define DVO_D 3
 
/* define the PORT for DP output type */
#define PORT_IDPB 7
#define PORT_IDPC 8
#define PORT_IDPD 9
 
/* Possible values for the "DVO Port" field for versions >= 155: */
#define DVO_PORT_HDMIA 0
#define DVO_PORT_HDMIB 1
743,6 → 767,8
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
#define DVO_PORT_DPE 11
#define DVO_PORT_HDMIE 12
#define DVO_PORT_MIPIA 21
#define DVO_PORT_MIPIB 22
#define DVO_PORT_MIPIC 23
757,6 → 783,13
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
#define MIPI_DSI_GENERIC_PANEL_ID 1
 
/*
* PMIC vs SoC Backlight support specified in pwm_blc
* field in mipi_config block below.
*/
#define PPS_BLC_PMIC 0
#define PPS_BLC_SOC 1
 
struct mipi_config {
u16 panel_id;
 
798,7 → 831,8
#define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2;
u16 lane_cnt:2;
u16 rsvd3:12;
u16 pixel_overlap:3;
u16 rsvd3:9;
 
u16 rsvd4;
 
/drivers/video/drm/i915/intel_crt.c
28,6 → 28,7
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
110,12 → 111,12
}
 
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
int dotclock;
 
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
 
dotclock = pipe_config->port_clock;
 
122,33 → 123,21
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
 
static void hsw_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
intel_ddi_get_config(encoder, pipe_config);
 
pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
 
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
I915_WRITE(SPLL_CTL,
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
POSTING_READ(SPLL_CTL);
udelay(20);
}
 
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
157,7 → 146,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 adpa;
 
if (INTEL_INFO(dev)->gen >= 5)
206,18 → 195,13
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
 
static void pch_disable_crt(struct intel_encoder *encoder)
{
}
 
static void hsw_crt_post_disable(struct intel_encoder *encoder)
static void pch_post_disable_crt(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
 
DRM_DEBUG_KMS("Disabling SPLL\n");
val = I915_READ(SPLL_CTL);
WARN_ON(!(val & SPLL_PLL_ENABLE));
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
intel_disable_crt(encoder);
}
 
static void intel_enable_crt(struct intel_encoder *encoder)
227,53 → 211,6
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
}
 
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_crt_dpms(struct drm_connector *connector, int mode)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
int old_dpms;
 
/* PCH platforms and VLV only support on/off. */
if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
 
if (mode == connector->dpms)
return;
 
old_dpms = connector->dpms;
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
crtc = encoder->base.crtc;
if (!crtc) {
encoder->connectors_active = false;
return;
}
 
/* We need the pipe to run for anything but OFF. */
if (mode == DRM_MODE_DPMS_OFF)
encoder->connectors_active = false;
else
encoder->connectors_active = true;
 
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode < old_dpms) {
/* From off to on, enable the pipe first. */
intel_crtc_update_dpms(crtc);
 
intel_crt_set_dpms(encoder, mode);
} else {
intel_crt_set_dpms(encoder, mode);
 
intel_crtc_update_dpms(crtc);
}
 
intel_modeset_check_state(connector->dev);
}
 
static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
303,7 → 240,7
}
 
static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
 
318,6 → 255,10
if (HAS_DDI(dev)) {
pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2;
 
pipe_config->dpll_hw_state.wrpll = 0;
pipe_config->dpll_hw_state.spll =
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
}
 
return true;
414,7 → 355,7
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_en, orig, stat;
u32 stat;
bool ret = false;
int i, tries = 0;
 
433,12 → 374,12
tries = 2;
else
tries = 1;
hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
 
for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
i915_hotplug_interrupt_update(dev_priv,
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
453,8 → 394,7
/* clear the interrupt we just generated, if any */
I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
 
/* and put the bits back */
I915_WRITE(PORT_HOTPLUG_EN, orig);
i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
 
return ret;
}
689,7 → 629,7
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
if (I915_HAS_HOTPLUG(dev)) {
if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
705,9 → 645,11
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else if (INTEL_INFO(dev)->gen < 4)
status = intel_crt_load_detect(crt);
else
status = intel_crt_load_detect(crt);
intel_release_load_detect_pipe(connector, &tmp);
status = connector_status_unknown;
intel_release_load_detect_pipe(connector, &tmp, &ctx);
} else
status = connector_status_unknown;
 
744,7 → 686,7
goto out;
 
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB);
ret = intel_crt_ddc_get_modes(connector, i2c);
 
out:
787,11 → 729,14
 
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = intel_crt_dpms,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_get_property = intel_connector_atomic_get_property,
};
 
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
845,7 → 790,7
if (!crt)
return;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(crt);
return;
882,7 → 827,12
crt->adpa_reg = ADPA;
 
crt->base.compute_config = intel_crt_compute_config;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
} else {
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT;
889,8 → 839,6
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.pre_enable = hsw_crt_pre_enable;
crt->base.post_disable = hsw_crt_post_disable;
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
919,7 → 867,7
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
 
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
}
 
intel_crt_reset(connector);
/drivers/video/drm/i915/intel_csr.c
0,0 → 1,485
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "i915_drv.h"
#include "i915_reg.h"
 
/**
* DOC: csr support for dmc
*
* Display Context Save and Restore (CSR) firmware support added from gen9
* onwards to drive newly added DMC (Display microcontroller) in display
* engine to save and restore the state of display engine when it enter into
* low-power state and comes back to normal.
*
* Firmware loading status will be one of the below states: FW_UNINITIALIZED,
* FW_LOADED, FW_FAILED.
*
* Once the firmware is written into the registers status will be moved from
* FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will
* be moved to FW_FAILED.
*/
 
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
 
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
 
/*
* SKL CSR registers for DC5 and DC6
*/
#define CSR_PROGRAM(i) (0x80000 + (i) * 4)
#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
#define CSR_HTP_ADDR_SKL 0x00500034
#define CSR_SSP_BASE 0x8F074
#define CSR_HTP_SKL 0x8F004
#define CSR_LAST_WRITE 0x8F034
#define CSR_LAST_WRITE_VALUE 0xc003b400
/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define CSR_MMIO_START_RANGE 0x80000
#define CSR_MMIO_END_RANGE 0x8FFFF
 
struct intel_css_header {
/* 0x09 for DMC */
uint32_t module_type;
 
/* Includes the DMC specific header in dwords */
uint32_t header_len;
 
/* always value would be 0x10000 */
uint32_t header_ver;
 
/* Not used */
uint32_t module_id;
 
/* Not used */
uint32_t module_vendor;
 
/* in YYYYMMDD format */
uint32_t date;
 
/* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
uint32_t size;
 
/* Not used */
uint32_t key_size;
 
/* Not used */
uint32_t modulus_size;
 
/* Not used */
uint32_t exponent_size;
 
/* Not used */
uint32_t reserved1[12];
 
/* Major Minor */
uint32_t version;
 
/* Not used */
uint32_t reserved2[8];
 
/* Not used */
uint32_t kernel_header_info;
} __packed;
 
struct intel_fw_info {
uint16_t reserved1;
 
/* Stepping (A, B, C, ..., *). * is a wildcard */
char stepping;
 
/* Sub-stepping (0, 1, ..., *). * is a wildcard */
char substepping;
 
uint32_t offset;
uint32_t reserved2;
} __packed;
 
struct intel_package_header {
/* DMC container header length in dwords */
unsigned char header_len;
 
/* always value would be 0x01 */
unsigned char header_ver;
 
unsigned char reserved[10];
 
/* Number of valid entries in the FWInfo array below */
uint32_t num_entries;
 
struct intel_fw_info fw_info[20];
} __packed;
 
struct intel_dmc_header {
/* always value would be 0x40403E3E */
uint32_t signature;
 
/* DMC binary header length */
unsigned char header_len;
 
/* 0x01 */
unsigned char header_ver;
 
/* Reserved */
uint16_t dmcc_ver;
 
/* Major, Minor */
uint32_t project;
 
/* Firmware program size (excluding header) in dwords */
uint32_t fw_size;
 
/* Major Minor version */
uint32_t fw_version;
 
/* Number of valid MMIO cycles present. */
uint32_t mmio_count;
 
/* MMIO address */
uint32_t mmioaddr[8];
 
/* MMIO data */
uint32_t mmiodata[8];
 
/* FW filename */
unsigned char dfile[32];
 
uint32_t reserved1[2];
} __packed;
 
struct stepping_info {
char stepping;
char substepping;
};
 
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
{'G', '0'}, {'H', '0'}, {'I', '0'}
};
 
static struct stepping_info bxt_stepping_info[] = {
{'A', '0'}, {'A', '1'}, {'A', '2'},
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
 
static char intel_get_stepping(struct drm_device *dev)
{
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].stepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
ARRAY_SIZE(bxt_stepping_info)))
return bxt_stepping_info[dev->pdev->revision].stepping;
else
return -ENODATA;
}
 
static char intel_get_substepping(struct drm_device *dev)
{
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].substepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
ARRAY_SIZE(bxt_stepping_info)))
return bxt_stepping_info[dev->pdev->revision].substepping;
else
return -ENODATA;
}
 
/**
* intel_csr_load_status_get() - to get firmware loading status.
* @dev_priv: i915 device.
*
* This function helps to get the firmware loading status.
*
* Return: Firmware loading status.
*/
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
{
enum csr_state state;
 
mutex_lock(&dev_priv->csr_lock);
state = dev_priv->csr.state;
mutex_unlock(&dev_priv->csr_lock);
 
return state;
}
 
/**
* intel_csr_load_status_set() - help to set firmware loading status.
* @dev_priv: i915 device.
* @state: enumeration of firmware loading status.
*
* Set the firmware loading status.
*/
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
enum csr_state state)
{
mutex_lock(&dev_priv->csr_lock);
dev_priv->csr.state = state;
mutex_unlock(&dev_priv->csr_lock);
}
 
/**
* intel_csr_load_program() - write the firmware from memory to register.
* @dev: drm device.
*
* CSR firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
void intel_csr_load_program(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
 
if (!IS_GEN9(dev)) {
DRM_ERROR("No CSR support available for this platform\n");
return;
}
 
/*
* FIXME: Firmware gets lost on S3/S4, but not when entering system
* standby or suspend-to-idle (which is just like forced runtime pm).
* Unfortunately the ACPI subsystem doesn't yet give us a way to
* differentiate this, hence figure it out with this hack.
*/
if (I915_READ(CSR_PROGRAM(0)))
return;
 
mutex_lock(&dev_priv->csr_lock);
fw_size = dev_priv->csr.dmc_fw_size;
for (i = 0; i < fw_size; i++)
I915_WRITE(CSR_PROGRAM(i), payload[i]);
 
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
I915_WRITE(dev_priv->csr.mmioaddr[i],
dev_priv->csr.mmiodata[i]);
}
 
dev_priv->csr.state = FW_LOADED;
mutex_unlock(&dev_priv->csr_lock);
}
 
static void finish_csr_load(const struct firmware *fw, void *context)
{
struct drm_i915_private *dev_priv = context;
struct drm_device *dev = dev_priv->dev;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr;
char stepping = intel_get_stepping(dev);
char substepping = intel_get_substepping(dev);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
bool fw_loaded = false;
 
if (!fw) {
i915_firmware_load_error_print(csr->fw_path, 0);
goto out;
}
 
if ((stepping == -ENODATA) || (substepping == -ENODATA)) {
DRM_ERROR("Unknown stepping info, firmware loading failed\n");
goto out;
}
 
/* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
(css_header->header_len * 4));
goto out;
}
readcount += sizeof(struct intel_css_header);
 
/* Extract Package Header information*/
package_header = (struct intel_package_header *)
&fw->data[readcount];
if (sizeof(struct intel_package_header) !=
(package_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong package header length %u bytes\n",
(package_header->header_len * 4));
goto out;
}
readcount += sizeof(struct intel_package_header);
 
/* Search for dmc_offset to find firware binary. */
for (i = 0; i < package_header->num_entries; i++) {
if (package_header->fw_info[i].substepping == '*' &&
stepping == package_header->fw_info[i].stepping) {
dmc_offset = package_header->fw_info[i].offset;
break;
} else if (stepping == package_header->fw_info[i].stepping &&
substepping == package_header->fw_info[i].substepping) {
dmc_offset = package_header->fw_info[i].offset;
break;
} else if (package_header->fw_info[i].stepping == '*' &&
package_header->fw_info[i].substepping == '*')
dmc_offset = package_header->fw_info[i].offset;
}
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
goto out;
}
readcount += dmc_offset;
 
/* Extract dmc_header information. */
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
(dmc_header->header_len));
goto out;
}
readcount += sizeof(struct intel_dmc_header);
 
/* Cache the dmc header info. */
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
DRM_ERROR("Firmware has wrong mmio count %u\n",
dmc_header->mmio_count);
goto out;
}
csr->mmio_count = dmc_header->mmio_count;
for (i = 0; i < dmc_header->mmio_count; i++) {
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
dmc_header->mmioaddr[i]);
goto out;
}
csr->mmioaddr[i] = dmc_header->mmioaddr[i];
csr->mmiodata[i] = dmc_header->mmiodata[i];
}
 
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
if (nbytes > CSR_MAX_FW_SIZE) {
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
goto out;
}
csr->dmc_fw_size = dmc_header->fw_size;
 
csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL);
if (!csr->dmc_payload) {
DRM_ERROR("Memory allocation failed for dmc payload\n");
goto out;
}
 
dmc_payload = csr->dmc_payload;
memcpy(dmc_payload, &fw->data[readcount], nbytes);
 
/* load csr program during system boot, as needed for DC states */
intel_csr_load_program(dev);
fw_loaded = true;
 
DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
out:
if (fw_loaded)
intel_runtime_pm_put(dev_priv);
else
intel_csr_load_status_set(dev_priv, FW_FAILED);
 
release_firmware(fw);
}
 
/**
* intel_csr_ucode_init() - initialize the firmware loading.
* @dev: drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
void intel_csr_ucode_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_csr *csr = &dev_priv->csr;
int ret;
 
if (!HAS_CSR(dev))
return;
 
if (IS_SKYLAKE(dev))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
else {
DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
intel_csr_load_status_set(dev_priv, FW_FAILED);
return;
}
#if 0
/*
* Obtain a runtime pm reference, until CSR is loaded,
* to avoid entering runtime-suspend.
*/
intel_runtime_pm_get(dev_priv);
 
/* CSR supported for platform, load firmware */
ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
&dev_priv->dev->pdev->dev,
GFP_KERNEL, dev_priv,
finish_csr_load);
if (ret) {
i915_firmware_load_error_print(csr->fw_path, ret);
intel_csr_load_status_set(dev_priv, FW_FAILED);
}
#endif
}
 
/**
* intel_csr_ucode_fini() - unload the CSR firmware.
* @dev: drm device.
*
* Firmmware unloading includes freeing the internal momory and reset the
* firmware loading status.
*/
void intel_csr_ucode_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!HAS_CSR(dev))
return;
 
intel_csr_load_status_set(dev_priv, FW_FAILED);
kfree(dev_priv->csr.dmc_payload);
}
 
void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
"CSR is not loaded.\n");
WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
"CSR program storage start is NULL\n");
WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
}
/drivers/video/drm/i915/intel_ddi.c
31,6 → 31,7
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
u32 trans2; /* vref sel, vswing */
u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
};
 
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
38,144 → 39,386
* automatically adapt to HDMI connections as well
*/
static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
{ 0x00FFFFFF, 0x0006000E },
{ 0x00D75FFF, 0x0005000A },
{ 0x00C30FFF, 0x00040006 },
{ 0x80AAAFFF, 0x000B0000 },
{ 0x00FFFFFF, 0x0005000A },
{ 0x00D75FFF, 0x000C0004 },
{ 0x80C30FFF, 0x000B0000 },
{ 0x00FFFFFF, 0x00040006 },
{ 0x80D75FFF, 0x000B0000 },
{ 0x00FFFFFF, 0x0006000E, 0x0 },
{ 0x00D75FFF, 0x0005000A, 0x0 },
{ 0x00C30FFF, 0x00040006, 0x0 },
{ 0x80AAAFFF, 0x000B0000, 0x0 },
{ 0x00FFFFFF, 0x0005000A, 0x0 },
{ 0x00D75FFF, 0x000C0004, 0x0 },
{ 0x80C30FFF, 0x000B0000, 0x0 },
{ 0x00FFFFFF, 0x00040006, 0x0 },
{ 0x80D75FFF, 0x000B0000, 0x0 },
};
 
static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
{ 0x00FFFFFF, 0x0007000E },
{ 0x00D75FFF, 0x000F000A },
{ 0x00C30FFF, 0x00060006 },
{ 0x00AAAFFF, 0x001E0000 },
{ 0x00FFFFFF, 0x000F000A },
{ 0x00D75FFF, 0x00160004 },
{ 0x00C30FFF, 0x001E0000 },
{ 0x00FFFFFF, 0x00060006 },
{ 0x00D75FFF, 0x001E0000 },
{ 0x00FFFFFF, 0x0007000E, 0x0 },
{ 0x00D75FFF, 0x000F000A, 0x0 },
{ 0x00C30FFF, 0x00060006, 0x0 },
{ 0x00AAAFFF, 0x001E0000, 0x0 },
{ 0x00FFFFFF, 0x000F000A, 0x0 },
{ 0x00D75FFF, 0x00160004, 0x0 },
{ 0x00C30FFF, 0x001E0000, 0x0 },
{ 0x00FFFFFF, 0x00060006, 0x0 },
{ 0x00D75FFF, 0x001E0000, 0x0 },
};
 
static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
/* Idx NT mV d T mV d db */
{ 0x00FFFFFF, 0x0006000E }, /* 0: 400 400 0 */
{ 0x00E79FFF, 0x000E000C }, /* 1: 400 500 2 */
{ 0x00D75FFF, 0x0005000A }, /* 2: 400 600 3.5 */
{ 0x00FFFFFF, 0x0005000A }, /* 3: 600 600 0 */
{ 0x00E79FFF, 0x001D0007 }, /* 4: 600 750 2 */
{ 0x00D75FFF, 0x000C0004 }, /* 5: 600 900 3.5 */
{ 0x00FFFFFF, 0x00040006 }, /* 6: 800 800 0 */
{ 0x80E79FFF, 0x00030002 }, /* 7: 800 1000 2 */
{ 0x00FFFFFF, 0x00140005 }, /* 8: 850 850 0 */
{ 0x00FFFFFF, 0x000C0004 }, /* 9: 900 900 0 */
{ 0x00FFFFFF, 0x001C0003 }, /* 10: 950 950 0 */
{ 0x80FFFFFF, 0x00030002 }, /* 11: 1000 1000 0 */
{ 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */
{ 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */
{ 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */
{ 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */
{ 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */
{ 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */
{ 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */
{ 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */
{ 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */
{ 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */
{ 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */
{ 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */
};
 
static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
{ 0x00FFFFFF, 0x00000012 },
{ 0x00EBAFFF, 0x00020011 },
{ 0x00C71FFF, 0x0006000F },
{ 0x00AAAFFF, 0x000E000A },
{ 0x00FFFFFF, 0x00020011 },
{ 0x00DB6FFF, 0x0005000F },
{ 0x00BEEFFF, 0x000A000C },
{ 0x00FFFFFF, 0x0005000F },
{ 0x00DB6FFF, 0x000A000C },
{ 0x00FFFFFF, 0x00000012, 0x0 },
{ 0x00EBAFFF, 0x00020011, 0x0 },
{ 0x00C71FFF, 0x0006000F, 0x0 },
{ 0x00AAAFFF, 0x000E000A, 0x0 },
{ 0x00FFFFFF, 0x00020011, 0x0 },
{ 0x00DB6FFF, 0x0005000F, 0x0 },
{ 0x00BEEFFF, 0x000A000C, 0x0 },
{ 0x00FFFFFF, 0x0005000F, 0x0 },
{ 0x00DB6FFF, 0x000A000C, 0x0 },
};
 
static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
{ 0x00FFFFFF, 0x0007000E },
{ 0x00D75FFF, 0x000E000A },
{ 0x00BEFFFF, 0x00140006 },
{ 0x80B2CFFF, 0x001B0002 },
{ 0x00FFFFFF, 0x000E000A },
{ 0x00DB6FFF, 0x00160005 },
{ 0x80C71FFF, 0x001A0002 },
{ 0x00F7DFFF, 0x00180004 },
{ 0x80D75FFF, 0x001B0002 },
{ 0x00FFFFFF, 0x0007000E, 0x0 },
{ 0x00D75FFF, 0x000E000A, 0x0 },
{ 0x00BEFFFF, 0x00140006, 0x0 },
{ 0x80B2CFFF, 0x001B0002, 0x0 },
{ 0x00FFFFFF, 0x000E000A, 0x0 },
{ 0x00DB6FFF, 0x00160005, 0x0 },
{ 0x80C71FFF, 0x001A0002, 0x0 },
{ 0x00F7DFFF, 0x00180004, 0x0 },
{ 0x80D75FFF, 0x001B0002, 0x0 },
};
 
static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
{ 0x00FFFFFF, 0x0001000E },
{ 0x00D75FFF, 0x0004000A },
{ 0x00C30FFF, 0x00070006 },
{ 0x00AAAFFF, 0x000C0000 },
{ 0x00FFFFFF, 0x0004000A },
{ 0x00D75FFF, 0x00090004 },
{ 0x00C30FFF, 0x000C0000 },
{ 0x00FFFFFF, 0x00070006 },
{ 0x00D75FFF, 0x000C0000 },
{ 0x00FFFFFF, 0x0001000E, 0x0 },
{ 0x00D75FFF, 0x0004000A, 0x0 },
{ 0x00C30FFF, 0x00070006, 0x0 },
{ 0x00AAAFFF, 0x000C0000, 0x0 },
{ 0x00FFFFFF, 0x0004000A, 0x0 },
{ 0x00D75FFF, 0x00090004, 0x0 },
{ 0x00C30FFF, 0x000C0000, 0x0 },
{ 0x00FFFFFF, 0x00070006, 0x0 },
{ 0x00D75FFF, 0x000C0000, 0x0 },
};
 
static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
/* Idx NT mV d T mV df db */
{ 0x00FFFFFF, 0x0007000E }, /* 0: 400 400 0 */
{ 0x00D75FFF, 0x000E000A }, /* 1: 400 600 3.5 */
{ 0x00BEFFFF, 0x00140006 }, /* 2: 400 800 6 */
{ 0x00FFFFFF, 0x0009000D }, /* 3: 450 450 0 */
{ 0x00FFFFFF, 0x000E000A }, /* 4: 600 600 0 */
{ 0x00D7FFFF, 0x00140006 }, /* 5: 600 800 2.5 */
{ 0x80CB2FFF, 0x001B0002 }, /* 6: 600 1000 4.5 */
{ 0x00FFFFFF, 0x00140006 }, /* 7: 800 800 0 */
{ 0x80E79FFF, 0x001B0002 }, /* 8: 800 1000 2 */
{ 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
{ 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */
{ 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */
{ 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */
{ 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */
{ 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */
{ 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */
{ 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */
{ 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */
{ 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */
{ 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
};
 
/* Skylake H and S */
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00000018, 0x000000a0 },
{ 0x00004014, 0x00000098 },
{ 0x00006012, 0x00000088 },
{ 0x00008010, 0x00000080 },
{ 0x00000018, 0x00000098 },
{ 0x00004014, 0x00000088 },
{ 0x00006012, 0x00000080 },
{ 0x00000018, 0x00000088 },
{ 0x00004014, 0x00000080 },
{ 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 },
{ 0x00009010, 0x000000C7, 0x0 },
{ 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 },
{ 0x00002016, 0x000000DF, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
};
 
/* Skylake U */
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x0000201B, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 },
{ 0x00002016, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
};
 
/* Skylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 },
{ 0x00000018, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
};
 
/*
* Skylake H and S
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
{ 0x00000018, 0x000000A8, 0x0 },
{ 0x00004013, 0x000000A9, 0x0 },
{ 0x00007011, 0x000000A2, 0x0 },
{ 0x00009010, 0x0000009C, 0x0 },
{ 0x00000018, 0x000000A9, 0x0 },
{ 0x00006013, 0x000000A2, 0x0 },
{ 0x00007011, 0x000000A6, 0x0 },
{ 0x00000018, 0x000000AB, 0x0 },
{ 0x00007013, 0x0000009F, 0x0 },
{ 0x00000018, 0x000000DF, 0x0 },
};
 
/*
* Skylake U
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
{ 0x00000018, 0x000000A8, 0x0 },
{ 0x00004013, 0x000000A9, 0x0 },
{ 0x00007011, 0x000000A2, 0x0 },
{ 0x00009010, 0x0000009C, 0x0 },
{ 0x00000018, 0x000000A9, 0x0 },
{ 0x00006013, 0x000000A2, 0x0 },
{ 0x00007011, 0x000000A6, 0x0 },
{ 0x00002016, 0x000000AB, 0x0 },
{ 0x00005013, 0x0000009F, 0x0 },
{ 0x00000018, 0x000000DF, 0x0 },
};
 
/*
* Skylake Y
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
{ 0x00000018, 0x000000A8, 0x0 },
{ 0x00004013, 0x000000AB, 0x0 },
{ 0x00007011, 0x000000A4, 0x0 },
{ 0x00009010, 0x000000DF, 0x0 },
{ 0x00000018, 0x000000AA, 0x0 },
{ 0x00006013, 0x000000A4, 0x0 },
{ 0x00007011, 0x0000009D, 0x0 },
{ 0x00000018, 0x000000A0, 0x0 },
{ 0x00006012, 0x000000DF, 0x0 },
{ 0x00000018, 0x0000008A, 0x0 },
};
 
/* Skylake U, H and S */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
/* Idx NT mV T mV db */
{ 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
{ 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
{ 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
{ 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
{ 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
{ 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
{ 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
{ 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
{ 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
{ 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
{ 0x00000018, 0x000000AC, 0x0 },
{ 0x00005012, 0x0000009D, 0x0 },
{ 0x00007011, 0x00000088, 0x0 },
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00000018, 0x00000098, 0x0 },
{ 0x00004013, 0x00000088, 0x0 },
{ 0x00006012, 0x00000087, 0x0 },
{ 0x00000018, 0x000000DF, 0x0 },
{ 0x00003015, 0x00000087, 0x0 }, /* Default */
{ 0x00003015, 0x000000C7, 0x0 },
{ 0x00000018, 0x000000C7, 0x0 },
};
 
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
/* Skylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
{ 0x00007011, 0x00000084, 0x0 },
{ 0x00000018, 0x000000A4, 0x0 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x00004013, 0x00000080, 0x0 },
{ 0x00006013, 0x000000C7, 0x0 },
{ 0x00000018, 0x0000008A, 0x0 },
{ 0x00003015, 0x000000C7, 0x0 }, /* Default */
{ 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
{ 0x00000018, 0x000000C7, 0x0 },
};
 
struct bxt_ddi_buf_trans {
u32 margin; /* swing value */
u32 scale; /* scale value */
u32 enable; /* scale enable */
u32 deemphasis;
bool default_index; /* true if the entry represents default value */
};
 
static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
/* Idx NT mV diff db */
{ 52, 0x9A, 0, 128, true }, /* 0: 400 0 */
{ 78, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
{ 104, 0x9A, 0, 64, false }, /* 2: 400 6 */
{ 154, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
{ 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
{ 116, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
{ 154, 0x9A, 0, 64, false }, /* 6: 600 6 */
{ 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
{ 154, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
{ 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
};
 
static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
/* Idx NT mV diff db */
{ 26, 0, 0, 128, false }, /* 0: 200 0 */
{ 38, 0, 0, 112, false }, /* 1: 200 1.5 */
{ 48, 0, 0, 96, false }, /* 2: 200 4 */
{ 54, 0, 0, 69, false }, /* 3: 200 6 */
{ 32, 0, 0, 128, false }, /* 4: 250 0 */
{ 48, 0, 0, 104, false }, /* 5: 250 1.5 */
{ 54, 0, 0, 85, false }, /* 6: 250 4 */
{ 43, 0, 0, 128, false }, /* 7: 300 0 */
{ 54, 0, 0, 101, false }, /* 8: 300 1.5 */
{ 48, 0, 0, 128, false }, /* 9: 300 0 */
};
 
/* BSpec has 2 recommended values - entries 0 and 8.
* Using the entry with higher vswing.
*/
static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
/* Idx NT mV diff db */
{ 52, 0x9A, 0, 128, false }, /* 0: 400 0 */
{ 52, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
{ 52, 0x9A, 0, 64, false }, /* 2: 400 6 */
{ 42, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
{ 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
{ 77, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
{ 77, 0x9A, 0, 64, false }, /* 6: 600 6 */
{ 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
{ 102, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
 
static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
enum port port, int type);
 
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
struct intel_digital_port **dig_port,
enum port *port)
{
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
 
if (type == INTEL_OUTPUT_DP_MST) {
struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
return intel_dig_port->port;
} else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
return intel_dig_port->port;
switch (intel_encoder->type) {
case INTEL_OUTPUT_DP_MST:
*dig_port = enc_to_mst(encoder)->primary;
*port = (*dig_port)->port;
break;
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_UNKNOWN:
*dig_port = enc_to_dig_port(encoder);
*port = (*dig_port)->port;
break;
case INTEL_OUTPUT_ANALOG:
*dig_port = NULL;
*port = PORT_E;
break;
default:
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
break;
}
}
 
} else if (type == INTEL_OUTPUT_ANALOG) {
return PORT_E;
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct intel_digital_port *dig_port;
enum port port;
 
ddi_get_encoder_port(intel_encoder, &dig_port, &port);
 
return port;
}
 
static bool
intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
{
return intel_dig_port->hdmi.hdmi_reg;
}
 
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
int *n_entries)
{
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
} else if (IS_SKL_ULT(dev)) {
ddi_translations = skl_u_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
} else {
DRM_ERROR("Invalid DDI encoder type %d\n", type);
BUG();
ddi_translations = skl_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
}
 
return ddi_translations;
}
 
static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
int *n_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_y_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
} else {
ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
}
} else if (IS_SKL_ULT(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_u_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
} else {
ddi_translations = skl_u_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
}
} else {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
} else {
ddi_translations = skl_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
}
}
 
return ddi_translations;
}
 
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_device *dev,
int *n_entries)
{
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
} else {
ddi_translations = skl_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
}
 
return ddi_translations;
}
 
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
183,11 → 426,13
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool supports_hdmi)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i, n_hdmi_entries, hdmi_800mV_0dB;
u32 iboost_bit = 0;
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
195,27 → 440,44
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
 
if (IS_SKYLAKE(dev)) {
if (IS_BROXTON(dev)) {
if (!supports_hdmi)
return;
 
/* Vswing programming for HDMI */
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
INTEL_OUTPUT_HDMI);
return;
} else if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp;
ddi_translations_edp = skl_ddi_translations_dp;
ddi_translations_hdmi = skl_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
hdmi_800mV_0dB = 7;
ddi_translations_dp =
skl_get_buf_trans_dp(dev, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev, &n_edp_entries);
ddi_translations_hdmi =
skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
hdmi_default_entry = 8;
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = 1<<31;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_800mV_0dB = 7;
hdmi_default_entry = 7;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
hdmi_800mV_0dB = 6;
hdmi_default_entry = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
222,23 → 484,30
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_800mV_0dB = 7;
hdmi_default_entry = 7;
}
 
switch (port) {
case PORT_A:
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break;
case PORT_B:
case PORT_C:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
case PORT_D:
if (intel_dp_is_edp(dev, PORT_D))
if (intel_dp_is_edp(dev, PORT_D)) {
ddi_translations = ddi_translations_edp;
else
size = n_edp_entries;
} else {
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
}
break;
case PORT_E:
if (ddi_translations_fdi)
245,29 → 514,32
ddi_translations = ddi_translations_fdi;
else
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
default:
BUG();
}
 
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i].trans1);
reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2);
reg += 4;
for (i = 0; i < size; i++) {
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
ddi_translations[i].trans1 | iboost_bit);
I915_WRITE(DDI_BUF_TRANS_HI(port, i),
ddi_translations[i].trans2);
}
 
if (!supports_hdmi)
return;
 
/* Choose a good default if VBT is badly populated */
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
hdmi_level >= n_hdmi_entries)
hdmi_level = hdmi_800mV_0dB;
hdmi_level = hdmi_default_entry;
 
/* Entry 9 is for HDMI: */
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
reg += 4;
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
reg += 4;
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
I915_WRITE(DDI_BUF_TRANS_HI(port, i),
ddi_translations_hdmi[hdmi_level].trans2);
}
 
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
275,14 → 547,31
*/
void intel_prepare_ddi(struct drm_device *dev)
{
int port;
struct intel_encoder *intel_encoder;
bool visited[I915_MAX_PORTS] = { 0, };
 
if (!HAS_DDI(dev))
return;
 
for (port = PORT_A; port <= PORT_E; port++)
intel_prepare_ddi_buffers(dev, port);
for_each_intel_encoder(dev, intel_encoder) {
struct intel_digital_port *intel_dig_port;
enum port port;
bool supports_hdmi;
 
if (intel_encoder->type == INTEL_OUTPUT_DSI)
continue;
 
ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
if (visited[port])
continue;
 
supports_hdmi = intel_dig_port &&
intel_dig_port_supports_hdmi(intel_dig_port);
 
intel_prepare_ddi_buffers(dev, port, supports_hdmi);
visited[port] = true;
}
}
 
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
290,7 → 579,7
uint32_t reg = DDI_BUF_CTL(port);
int i;
 
for (i = 0; i < 8; i++) {
for (i = 0; i < 16; i++) {
udelay(1);
if (I915_READ(reg) & DDI_BUF_IS_IDLE)
return;
321,7 → 610,7
*
* WaFDIAutoLinkSetTimingOverrride:hsw
*/
I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) |
FDI_RX_PWRDN_LANE0_VAL(2) |
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
328,18 → 617,18
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
udelay(220);
 
/* Switch from Rawclk to PCDclk */
rx_ctl_val |= FDI_PCDCLK;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
 
/* Configure Port Clock Select */
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
 
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
357,7 → 646,7
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->config.fdi_lanes - 1) << 1) |
((intel_crtc->config->fdi_lanes - 1) << 1) |
DDI_BUF_TRANS_SELECT(i / 2));
POSTING_READ(DDI_BUF_CTL(PORT_E));
 
364,21 → 653,21
udelay(600);
 
/* Program PCH FDI Receiver TU */
I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
 
/* Enable PCH FDI Receiver with auto-training */
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
 
/* Wait for FDI receiver lane calibration */
udelay(30);
 
/* Unset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(_FDI_RXA_MISC);
temp = I915_READ(FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
I915_WRITE(_FDI_RXA_MISC, temp);
POSTING_READ(_FDI_RXA_MISC);
I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
POSTING_READ(FDI_RX_MISC(PIPE_A));
 
/* Wait for FDI auto training time */
udelay(5);
412,15 → 701,15
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
 
rx_ctl_val &= ~FDI_RX_ENABLE;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
 
/* Reset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(_FDI_RXA_MISC);
temp = I915_READ(FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
I915_WRITE(_FDI_RXA_MISC, temp);
POSTING_READ(_FDI_RXA_MISC);
I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
POSTING_READ(FDI_RX_MISC(PIPE_A));
}
 
DRM_ERROR("FDI link training failed!\n");
435,7 → 724,6
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
 
}
 
static struct intel_encoder *
459,19 → 747,26
return ret;
}
 
static struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *intel_encoder, *ret = NULL;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *ret = NULL;
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int num_encoders = 0;
int i;
 
for_each_intel_encoder(dev, intel_encoder) {
if (intel_encoder->new_crtc == crtc) {
ret = intel_encoder;
state = crtc_state->base.state;
 
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
 
ret = to_intel_encoder(connector_state->best_encoder);
num_encoders++;
}
}
 
WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
pipe_name(crtc->pipe));
499,11 → 794,11
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
 
struct wrpll_rnp {
struct hsw_wrpll_rnp {
unsigned p, n2, r2;
};
 
static unsigned wrpll_get_budget_for_freq(int clock)
static unsigned hsw_wrpll_get_budget_for_freq(int clock)
{
unsigned budget;
 
577,9 → 872,9
return budget;
}
 
static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
unsigned r2, unsigned n2, unsigned p,
struct wrpll_rnp *best)
struct hsw_wrpll_rnp *best)
{
uint64_t a, b, c, d, diff, diff_best;
 
636,8 → 931,7
/* Otherwise a < c && b >= d, do nothing */
}
 
static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
int reg)
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
{
int refclk = LC_FREQ;
int n, p, r;
677,8 → 971,8
uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq;
 
cfgcr1_reg = GET_CFG_CR1_REG(dpll);
cfgcr2_reg = GET_CFG_CR2_REG(dpll);
cfgcr1_reg = DPLL_CFGCR1(dpll);
cfgcr2_reg = DPLL_CFGCR2(dpll);
 
cfgcr1_val = I915_READ(cfgcr1_reg);
cfgcr2_val = I915_READ(cfgcr2_reg);
730,9 → 1024,29
return dco_freq / (p0 * p1 * p2 * 5);
}
 
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
 
if (pipe_config->has_pch_encoder)
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
else if (pipe_config->has_dp_encoder)
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
dotclock = pipe_config->port_clock * 2 / 3;
else
dotclock = pipe_config->port_clock;
 
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
 
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
 
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
745,17 → 1059,26
if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
link_clock = skl_calc_wrpll_link(dev_priv, dpll);
} else {
link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
 
switch (link_clock) {
case DPLL_CRTL1_LINK_RATE_810:
case DPLL_CTRL1_LINK_RATE_810:
link_clock = 81000;
break;
case DPLL_CRTL1_LINK_RATE_1350:
case DPLL_CTRL1_LINK_RATE_1080:
link_clock = 108000;
break;
case DPLL_CTRL1_LINK_RATE_1350:
link_clock = 135000;
break;
case DPLL_CRTL1_LINK_RATE_2700:
case DPLL_CTRL1_LINK_RATE_1620:
link_clock = 162000;
break;
case DPLL_CTRL1_LINK_RATE_2160:
link_clock = 216000;
break;
case DPLL_CTRL1_LINK_RATE_2700:
link_clock = 270000;
break;
default:
767,16 → 1090,11
 
pipe_config->port_clock = link_clock;
 
if (pipe_config->has_dp_encoder)
pipe_config->adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
ddi_dotclock_get(pipe_config);
}
 
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
794,10 → 1112,10
link_clock = 270000;
break;
case PORT_CLK_SEL_WRPLL1:
link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
break;
case PORT_CLK_SEL_WRPLL2:
link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
break;
case PORT_CLK_SEL_SPLL:
pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
819,22 → 1137,57
 
pipe_config->port_clock = link_clock * 2;
 
if (pipe_config->has_pch_encoder)
pipe_config->adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
else if (pipe_config->has_dp_encoder)
pipe_config->adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
ddi_dotclock_get(pipe_config);
}
 
static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id dpll)
{
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
intel_clock_t clock;
 
/* For DDI ports we always use a shared PLL. */
if (WARN_ON(dpll == DPLL_ID_PRIVATE))
return 0;
 
pll = &dev_priv->shared_dplls[dpll];
state = &pll->config.hw_state;
 
clock.m1 = 2;
clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
 
return chv_calc_dpll_params(100000, &clock);
}
 
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
enum port port = intel_ddi_get_encoder_port(encoder);
uint32_t dpll = port;
 
pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
 
ddi_dotclock_get(pipe_config);
}
 
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
 
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev))
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_BROXTON(dev))
bxt_ddi_clock_get(encoder, pipe_config);
}
 
static void
843,12 → 1196,12
{
uint64_t freq2k;
unsigned p, n2, r2;
struct wrpll_rnp best = { 0, 0, 0 };
struct hsw_wrpll_rnp best = { 0, 0, 0 };
unsigned budget;
 
freq2k = clock / 100;
 
budget = wrpll_get_budget_for_freq(clock);
budget = hsw_wrpll_get_budget_for_freq(clock);
 
/* Special case handling for 540 pixel clock: bypass WR PLL entirely
* and directly pass the LC PLL to it. */
892,7 → 1245,7
n2++) {
 
for (p = P_MIN; p <= P_MAX; p += P_INC)
wrpll_update_rnp(freq2k, budget,
hsw_wrpll_update_rnp(freq2k, budget,
r2, n2, p, &best);
}
}
904,9 → 1257,11
 
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder,
int clock)
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
{
int clock = crtc_state->port_clock;
 
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
struct intel_shared_dpll *pll;
uint32_t val;
918,9 → 1273,12
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
 
intel_crtc->new_config->dpll_hw_state.wrpll = val;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
pll = intel_get_shared_dpll(intel_crtc);
crtc_state->dpll_hw_state.wrpll = val;
 
pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
927,174 → 1285,278
return false;
}
 
intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
} else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
struct drm_atomic_state *state = crtc_state->base.state;
struct intel_shared_dpll_config *spll =
&intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
 
if (spll->crtc_mask &&
WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
return false;
 
crtc_state->shared_dpll = DPLL_ID_SPLL;
spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
spll->crtc_mask |= 1 << intel_crtc->pipe;
}
 
return true;
}
 
struct skl_wrpll_params {
uint32_t dco_fraction;
uint32_t dco_integer;
uint32_t qdiv_ratio;
uint32_t qdiv_mode;
uint32_t kdiv;
uint32_t pdiv;
uint32_t central_freq;
struct skl_wrpll_context {
uint64_t min_deviation; /* current minimal deviation */
uint64_t central_freq; /* chosen central freq */
uint64_t dco_freq; /* chosen dco freq */
unsigned int p; /* chosen divider */
};
 
static void
skl_ddi_calculate_wrpll(int clock /* in Hz */,
struct skl_wrpll_params *wrpll_params)
static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
{
uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
uint64_t dco_central_freq[3] = {8400000000ULL,
9000000000ULL,
9600000000ULL};
uint32_t min_dco_deviation = 400;
uint32_t min_dco_index = 3;
uint32_t P0[4] = {1, 2, 3, 7};
uint32_t P2[4] = {1, 2, 3, 5};
bool found = false;
uint32_t candidate_p = 0;
uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
uint32_t candidate_p2[3] = {0};
uint32_t dco_central_freq_deviation[3];
uint32_t i, P1, k, dco_count;
bool retry_with_odd = false;
uint64_t dco_freq;
memset(ctx, 0, sizeof(*ctx));
 
/* Determine P0, P1 or P2 */
for (dco_count = 0; dco_count < 3; dco_count++) {
found = false;
candidate_p =
div64_u64(dco_central_freq[dco_count], afe_clock);
if (retry_with_odd == false)
candidate_p = (candidate_p % 2 == 0 ?
candidate_p : candidate_p + 1);
ctx->min_deviation = U64_MAX;
}
 
for (P1 = 1; P1 < candidate_p; P1++) {
for (i = 0; i < 4; i++) {
if (!(P0[i] != 1 || P1 == 1))
continue;
/* DCO freq must be within +1%/-6% of the DCO central freq */
#define SKL_DCO_MAX_PDEVIATION 100
#define SKL_DCO_MAX_NDEVIATION 600
 
for (k = 0; k < 4; k++) {
if (P1 != 1 && P2[k] != 2)
continue;
static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
uint64_t central_freq,
uint64_t dco_freq,
unsigned int divider)
{
uint64_t deviation;
 
if (candidate_p == P0[i] * P1 * P2[k]) {
/* Found possible P0, P1, P2 */
found = true;
candidate_p0[dco_count] = P0[i];
candidate_p1[dco_count] = P1;
candidate_p2[dco_count] = P2[k];
goto found;
}
deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
central_freq);
 
/* positive deviation */
if (dco_freq >= central_freq) {
if (deviation < SKL_DCO_MAX_PDEVIATION &&
deviation < ctx->min_deviation) {
ctx->min_deviation = deviation;
ctx->central_freq = central_freq;
ctx->dco_freq = dco_freq;
ctx->p = divider;
}
/* negative deviation */
} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
deviation < ctx->min_deviation) {
ctx->min_deviation = deviation;
ctx->central_freq = central_freq;
ctx->dco_freq = dco_freq;
ctx->p = divider;
}
}
 
found:
if (found) {
dco_central_freq_deviation[dco_count] =
div64_u64(10000 *
abs_diff((candidate_p * afe_clock),
dco_central_freq[dco_count]),
dco_central_freq[dco_count]);
static void skl_wrpll_get_multipliers(unsigned int p,
unsigned int *p0 /* out */,
unsigned int *p1 /* out */,
unsigned int *p2 /* out */)
{
/* even dividers */
if (p % 2 == 0) {
unsigned int half = p / 2;
 
if (dco_central_freq_deviation[dco_count] <
min_dco_deviation) {
min_dco_deviation =
dco_central_freq_deviation[dco_count];
min_dco_index = dco_count;
if (half == 1 || half == 2 || half == 3 || half == 5) {
*p0 = 2;
*p1 = 1;
*p2 = half;
} else if (half % 2 == 0) {
*p0 = 2;
*p1 = half / 2;
*p2 = 2;
} else if (half % 3 == 0) {
*p0 = 3;
*p1 = half / 3;
*p2 = 2;
} else if (half % 7 == 0) {
*p0 = 7;
*p1 = half / 7;
*p2 = 2;
}
} else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
*p0 = 3;
*p1 = 1;
*p2 = p / 3;
} else if (p == 5 || p == 7) {
*p0 = p;
*p1 = 1;
*p2 = 1;
} else if (p == 15) {
*p0 = 3;
*p1 = 1;
*p2 = 5;
} else if (p == 21) {
*p0 = 7;
*p1 = 1;
*p2 = 3;
} else if (p == 35) {
*p0 = 7;
*p1 = 1;
*p2 = 5;
}
 
if (min_dco_index > 2 && dco_count == 2) {
retry_with_odd = true;
dco_count = 0;
}
}
 
if (min_dco_index > 2) {
WARN(1, "No valid values found for the given pixel clock\n");
} else {
wrpll_params->central_freq = dco_central_freq[min_dco_index];
struct skl_wrpll_params {
uint32_t dco_fraction;
uint32_t dco_integer;
uint32_t qdiv_ratio;
uint32_t qdiv_mode;
uint32_t kdiv;
uint32_t pdiv;
uint32_t central_freq;
};
 
switch (dco_central_freq[min_dco_index]) {
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
uint64_t afe_clock,
uint64_t central_freq,
uint32_t p0, uint32_t p1, uint32_t p2)
{
uint64_t dco_freq;
 
switch (central_freq) {
case 9600000000ULL:
wrpll_params->central_freq = 0;
params->central_freq = 0;
break;
case 9000000000ULL:
wrpll_params->central_freq = 1;
params->central_freq = 1;
break;
case 8400000000ULL:
wrpll_params->central_freq = 3;
params->central_freq = 3;
}
 
switch (candidate_p0[min_dco_index]) {
switch (p0) {
case 1:
wrpll_params->pdiv = 0;
params->pdiv = 0;
break;
case 2:
wrpll_params->pdiv = 1;
params->pdiv = 1;
break;
case 3:
wrpll_params->pdiv = 2;
params->pdiv = 2;
break;
case 7:
wrpll_params->pdiv = 4;
params->pdiv = 4;
break;
default:
WARN(1, "Incorrect PDiv\n");
}
 
switch (candidate_p2[min_dco_index]) {
switch (p2) {
case 5:
wrpll_params->kdiv = 0;
params->kdiv = 0;
break;
case 2:
wrpll_params->kdiv = 1;
params->kdiv = 1;
break;
case 3:
wrpll_params->kdiv = 2;
params->kdiv = 2;
break;
case 1:
wrpll_params->kdiv = 3;
params->kdiv = 3;
break;
default:
WARN(1, "Incorrect KDiv\n");
}
 
wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
wrpll_params->qdiv_mode =
(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
params->qdiv_ratio = p1;
params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
 
dco_freq = candidate_p0[min_dco_index] *
candidate_p1[min_dco_index] *
candidate_p2[min_dco_index] * afe_clock;
dco_freq = p0 * p1 * p2 * afe_clock;
 
/*
* Intermediate values are in Hz.
* Divide by MHz to match bsepc
*/
wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
wrpll_params->dco_fraction =
div_u64(((div_u64(dco_freq, 24) -
wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
params->dco_fraction =
div_u64((div_u64(dco_freq, 24) -
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}
 
static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
struct skl_wrpll_params *wrpll_params)
{
uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
uint64_t dco_central_freq[3] = {8400000000ULL,
9000000000ULL,
9600000000ULL};
static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
24, 28, 30, 32, 36, 40, 42, 44,
48, 52, 54, 56, 60, 64, 66, 68,
70, 72, 76, 78, 80, 84, 88, 90,
92, 96, 98 };
static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
static const struct {
const int *list;
int n_dividers;
} dividers[] = {
{ even_dividers, ARRAY_SIZE(even_dividers) },
{ odd_dividers, ARRAY_SIZE(odd_dividers) },
};
struct skl_wrpll_context ctx;
unsigned int dco, d, i;
unsigned int p0, p1, p2;
 
skl_wrpll_context_init(&ctx);
 
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
for (i = 0; i < dividers[d].n_dividers; i++) {
unsigned int p = dividers[d].list[i];
uint64_t dco_freq = p * afe_clock;
 
skl_wrpll_try_divider(&ctx,
dco_central_freq[dco],
dco_freq,
p);
/*
* Skip the remaining dividers if we're sure to
* have found the definitive divider, we can't
* improve a 0 deviation.
*/
if (ctx.min_deviation == 0)
goto skip_remaining_dividers;
}
}
 
skip_remaining_dividers:
/*
* If a solution is found with an even divider, prefer
* this one.
*/
if (d == 0 && ctx.p)
break;
}
 
if (!ctx.p) {
DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
return false;
}
 
/*
* gcc incorrectly analyses that these can be used without being
* initialized. To be fair, it's hard to guess.
*/
p0 = p1 = p2 = 0;
skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
p0, p1, p2);
 
return true;
}
 
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder,
int clock)
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
{
struct intel_shared_dpll *pll;
uint32_t ctrl1, cfgcr1, cfgcr2;
int clock = crtc_state->port_clock;
 
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
1108,7 → 1570,8
 
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
return false;
 
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1120,18 → 1583,15
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
break;
case DP_LINK_BW_2_7:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
case 135000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
break;
case DP_LINK_BW_5_4:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
case 270000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
break;
}
 
1139,11 → 1599,14
} else /* eDP */
return true;
 
intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
pll = intel_get_shared_dpll(intel_crtc);
crtc_state->dpll_hw_state.ctrl1 = ctrl1;
crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
 
pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
1151,12 → 1614,159
}
 
/* shared DPLL id 0 is DPLL 1 */
intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
crtc_state->ddi_pll_sel = pll->id + 1;
 
return true;
}
 
/* bxt clock parameters */
struct bxt_clk_div {
int clock;
uint32_t p1;
uint32_t p2;
uint32_t m2_int;
uint32_t m2_frac;
bool m2_frac_en;
uint32_t n;
};
 
/* pre-calculated values for DP linkrates */
static const struct bxt_clk_div bxt_dp_clk_val[] = {
{162000, 4, 2, 32, 1677722, 1, 1},
{270000, 4, 1, 27, 0, 0, 1},
{540000, 2, 1, 27, 0, 0, 1},
{216000, 3, 2, 32, 1677722, 1, 1},
{243000, 4, 1, 24, 1258291, 1, 1},
{324000, 4, 1, 32, 1677722, 1, 1},
{432000, 3, 1, 32, 1677722, 1, 1}
};
 
static bool
bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
{
struct intel_shared_dpll *pll;
struct bxt_clk_div clk_div = {0};
int vco = 0;
uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
uint32_t lanestagger;
int clock = crtc_state->port_clock;
 
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
intel_clock_t best_clock;
 
/* Calculate HDMI div */
/*
* FIXME: tie the following calculation into
* i9xx_crtc_compute_clock
*/
if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
clock, pipe_name(intel_crtc->pipe));
return false;
}
 
clk_div.p1 = best_clock.p1;
clk_div.p2 = best_clock.p2;
WARN_ON(best_clock.m1 != 2);
clk_div.n = best_clock.n;
clk_div.m2_int = best_clock.m2 >> 22;
clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
clk_div.m2_frac_en = clk_div.m2_frac != 0;
 
vco = best_clock.vco;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_EDP) {
int i;
 
clk_div = bxt_dp_clk_val[0];
for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
if (bxt_dp_clk_val[i].clock == clock) {
clk_div = bxt_dp_clk_val[i];
break;
}
}
vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
}
 
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
gain_ctl = 3;
targ_cnt = 8;
} else if ((vco > 5400000 && vco < 6200000) ||
(vco >= 4800000 && vco < 5400000)) {
prop_coef = 5;
int_coef = 11;
gain_ctl = 3;
targ_cnt = 9;
} else if (vco == 5400000) {
prop_coef = 3;
int_coef = 8;
gain_ctl = 1;
targ_cnt = 9;
} else {
DRM_ERROR("Invalid VCO\n");
return false;
}
 
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
if (clock > 270000)
lanestagger = 0x18;
else if (clock > 135000)
lanestagger = 0x0d;
else if (clock > 67000)
lanestagger = 0x07;
else if (clock > 33000)
lanestagger = 0x04;
else
lanestagger = 0x02;
 
crtc_state->dpll_hw_state.ebb0 =
PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
 
if (clk_div.m2_frac_en)
crtc_state->dpll_hw_state.pll3 =
PORT_PLL_M2_FRAC_ENABLE;
 
crtc_state->dpll_hw_state.pll6 =
prop_coef | PORT_PLL_INT_COEFF(int_coef);
crtc_state->dpll_hw_state.pll6 |=
PORT_PLL_GAIN_CTL(gain_ctl);
 
crtc_state->dpll_hw_state.pll8 = targ_cnt;
 
crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
crtc_state->dpll_hw_state.pll10 =
PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
| PORT_PLL_DCO_AMP_OVR_EN_H;
 
crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
crtc_state->dpll_hw_state.pcsdw12 =
LANESTAGGER_STRAP_OVRD | lanestagger;
 
pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
 
/* shared DPLL id 0 is DPLL A */
crtc_state->ddi_pll_sel = pll->id;
 
return true;
}
 
/*
* Tries to find a *shared* PLL for the CRTC and store it in
* intel_crtc->ddi_pll_sel.
*
1163,17 → 1773,22
* For private DPLLs, compute_config() should do the selection for us. This
* function should be folded into compute_config() eventually.
*/
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(intel_crtc);
int clock = intel_crtc->new_config->port_clock;
intel_ddi_get_crtc_new_encoder(crtc_state);
 
if (IS_SKYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else if (IS_BROXTON(dev))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else
return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
return hsw_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
}
 
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
1181,13 → 1796,13
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config.pipe_bpp) {
switch (intel_crtc->config->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
1212,7 → 1827,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
1230,7 → 1845,7
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t temp;
1239,7 → 1854,7
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
 
switch (intel_crtc->config.pipe_bpp) {
switch (intel_crtc->config->pipe_bpp) {
case 18:
temp |= TRANS_DDI_BPC_6;
break;
1256,9 → 1871,9
BUG();
}
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
 
if (cpu_transcoder == TRANSCODER_EDP) {
1269,8 → 1884,8
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev) &&
(intel_crtc->config.pch_pfit.enabled ||
intel_crtc->config.pch_pfit.force_thru))
(intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
1288,7 → 1903,7
}
 
if (type == INTEL_OUTPUT_HDMI) {
if (intel_crtc->config.has_hdmi_sink)
if (intel_crtc->config->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
1295,7 → 1910,7
 
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
 
} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
type == INTEL_OUTPUT_EDP) {
1306,7 → 1921,7
} else
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
} else if (type == INTEL_OUTPUT_DP_MST) {
struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
 
1315,7 → 1930,7
} else
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
intel_encoder->type, pipe_name(pipe));
1442,10 → 2057,11
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
1455,7 → 2071,7
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
1462,6 → 2078,199
TRANS_CLK_SEL_DISABLED);
}
 
static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
enum port port, int type)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
uint8_t dp_iboost, hdmi_iboost;
int n_entries;
u32 reg;
 
/* VBT may override standard boost values */
dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
 
if (type == INTEL_OUTPUT_DISPLAYPORT) {
if (dp_iboost) {
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
iboost = ddi_translations[port].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
if (dp_iboost) {
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
iboost = ddi_translations[port].i_boost;
}
} else if (type == INTEL_OUTPUT_HDMI) {
if (hdmi_iboost) {
iboost = hdmi_iboost;
} else {
ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
iboost = ddi_translations[port].i_boost;
}
} else {
return;
}
 
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
DRM_ERROR("Invalid I_boost value %u\n", iboost);
return;
}
 
reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
reg &= ~BALANCE_LEG_MASK(port);
reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
 
if (iboost)
reg |= iboost << BALANCE_LEG_SHIFT(port);
else
reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
 
I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
}
 
static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
enum port port, int type)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
uint32_t val;
 
if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
ddi_translations = bxt_ddi_translations_edp;
} else if (type == INTEL_OUTPUT_DISPLAYPORT
|| type == INTEL_OUTPUT_EDP) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
ddi_translations = bxt_ddi_translations_dp;
} else if (type == INTEL_OUTPUT_HDMI) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
ddi_translations = bxt_ddi_translations_hdmi;
} else {
DRM_DEBUG_KMS("Vswing programming not done for encoder %d\n",
type);
return;
}
 
/* Check if default value has to be used */
if (level >= n_entries ||
(type == INTEL_OUTPUT_HDMI && level == HDMI_LEVEL_SHIFT_UNKNOWN)) {
for (i = 0; i < n_entries; i++) {
if (ddi_translations[i].default_index) {
level = i;
break;
}
}
}
 
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
 
val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
 
val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
val &= ~SCALE_DCOMP_METHOD;
if (ddi_translations[level].enable)
val |= SCALE_DCOMP_METHOD;
 
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
 
I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
 
val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
val &= ~DE_EMPHASIS;
val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
 
val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
}
 
static uint32_t translate_signal_level(int signal_levels)
{
uint32_t level;
 
switch (signal_levels) {
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
signal_levels);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
level = 0;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
level = 1;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
level = 2;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
level = 3;
break;
 
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
level = 4;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
level = 5;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
level = 6;
break;
 
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
level = 7;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
level = 8;
break;
 
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
level = 9;
break;
}
 
return level;
}
 
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = dport->base.base.dev;
struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
enum port port = dport->port;
uint32_t level;
 
level = translate_signal_level(signal_levels);
 
if (IS_SKYLAKE(dev))
skl_ddi_set_iboost(dev, level, port, encoder->type);
else if (IS_BROXTON(dev))
bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
 
return DDI_BUF_TRANS_SELECT(level);
}
 
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
1470,6 → 2279,7
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
int hdmi_level;
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1477,7 → 2287,7
}
 
if (IS_SKYLAKE(dev)) {
uint32_t dpll = crtc->config.ddi_pll_sel;
uint32_t dpll = crtc->config->ddi_pll_sel;
uint32_t val;
 
/*
1491,8 → 2301,8
 
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
 
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
1508,27 → 2318,34
 
I915_WRITE(DPLL_CTRL2, val);
 
} else {
WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
} else if (INTEL_INFO(dev)->gen < 9) {
WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
}
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_dp_set_link_params(intel_dp, crtc->config);
 
intel_ddi_init_dp_buf_reg(intel_encoder);
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
if (IS_BROXTON(dev)) {
hdmi_level = dev_priv->vbt.
ddi_port_info[port].hdmi_level_shift;
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
INTEL_OUTPUT_HDMI);
}
intel_hdmi->set_infoframes(encoder,
crtc->config.has_hdmi_sink,
&crtc->config.adjusted_mode);
crtc->config->has_hdmi_sink,
&crtc->config->base.adjusted_mode);
}
}
 
1567,7 → 2384,7
if (IS_SKYLAKE(dev))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else
else if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
 
1600,9 → 2417,10
 
intel_edp_backlight_on(intel_dp);
intel_psr_enable(intel_dp);
intel_edp_drrs_enable(intel_dp);
}
 
if (intel_crtc->config.has_audio) {
if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
intel_audio_codec_enable(intel_encoder);
}
1617,7 → 2435,7
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (intel_crtc->config.has_audio) {
if (intel_crtc->config->has_audio) {
intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
1625,119 → 2443,29
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_edp_drrs_disable(intel_dp);
intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
}
 
static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
uint32_t cdctl = I915_READ(CDCLK_CTL);
uint32_t linkrate;
 
if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
WARN(1, "LCPLL1 not enabled\n");
return 24000; /* 24MHz is the cd freq with NSSC ref */
I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
 
if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
return 540000;
 
linkrate = (I915_READ(DPLL_CTRL1) &
DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
 
if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
linkrate == DPLL_CRTL1_LINK_RATE_1080) {
/* vco 8640 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 432000;
case CDCLK_FREQ_337_308:
return 308570;
case CDCLK_FREQ_675_617:
return 617140;
default:
WARN(1, "Unknown cd freq selection\n");
}
} else {
/* vco 8100 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 450000;
case CDCLK_FREQ_337_308:
return 337500;
case CDCLK_FREQ_675_617:
return 675000;
default:
WARN(1, "Unknown cd freq selection\n");
}
}
 
/* error case, do as if DPLL0 isn't enabled */
return 24000;
}
 
static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
if (lcpll & LCPLL_CD_SOURCE_FCLK)
return 800000;
else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
else if (freq == LCPLL_CLK_FREQ_54O_BDW)
return 540000;
else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
return 337500;
else
return 675000;
}
 
static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
if (lcpll & LCPLL_CD_SOURCE_FCLK)
return 800000;
else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
else if (IS_HSW_ULT(dev))
return 337500;
else
return 540000;
}
 
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
if (IS_SKYLAKE(dev))
return skl_get_cdclk_freq(dev_priv);
 
if (IS_BROADWELL(dev))
return bdw_get_cdclk_freq(dev_priv);
 
/* Haswell */
return hsw_get_cdclk_freq(dev_priv);
}
 
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
POSTING_READ(SPLL_CTL);
udelay(20);
}
 
static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
1747,7 → 2475,17
POSTING_READ(WRPLL_CTL(pll->id));
}
 
static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
 
val = I915_READ(SPLL_CTL);
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
}
 
static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
1762,9 → 2500,26
return val & WRPLL_PLL_ENABLE;
}
 
static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
 
return val & SPLL_PLL_ENABLE;
}
 
 
static const char * const hsw_ddi_pll_names[] = {
"WRPLL 1",
"WRPLL 2",
"SPLL"
};
 
static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
1771,16 → 2526,24
{
int i;
 
dev_priv->num_shared_dpll = 2;
dev_priv->num_shared_dpll = 3;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
for (i = 0; i < 2; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
dev_priv->shared_dplls[i].get_hw_state =
hsw_ddi_pll_get_hw_state;
hsw_ddi_wrpll_get_hw_state;
}
 
/* SPLL is special, but needs to be initialized anyway.. */
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
 
}
 
static const char * const skl_ddi_pll_names[] = {
1798,20 → 2561,20
{
/* DPLL 1 */
.ctl = LCPLL2_CTL,
.cfgcr1 = DPLL1_CFGCR1,
.cfgcr2 = DPLL1_CFGCR2,
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
},
{
/* DPLL 2 */
.ctl = WRPLL_CTL1,
.cfgcr1 = DPLL2_CFGCR1,
.cfgcr2 = DPLL2_CFGCR2,
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
},
{
/* DPLL 3 */
.ctl = WRPLL_CTL2,
.cfgcr1 = DPLL3_CFGCR1,
.cfgcr2 = DPLL3_CFGCR2,
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
},
};
 
1828,7 → 2591,7
val = I915_READ(DPLL_CTRL1);
 
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= pll->config.hw_state.ctrl1 << (dpll * 6);
 
I915_WRITE(DPLL_CTRL1, val);
1904,6 → 2667,326
}
}
 
static void broxton_phy_init(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
enum port port;
uint32_t val;
 
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
 
/* Considering 10ms timeout until BSpec is updated */
if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10))
DRM_ERROR("timeout during PHY%d power on\n", phy);
 
for (port = (phy == DPIO_PHY0 ? PORT_B : PORT_A);
port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
int lane;
 
for (lane = 0; lane < 4; lane++) {
val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
/*
* Note that on CHV this flag is called UPAR, but has
* the same function.
*/
val &= ~LATENCY_OPTIM;
if (lane != 1)
val |= LATENCY_OPTIM;
 
I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
}
}
 
/* Program PLL Rcomp code offset */
val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
val &= ~IREF0RC_OFFSET_MASK;
val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
 
val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
val &= ~IREF1RC_OFFSET_MASK;
val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
 
/* Program power gating */
val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
SUS_CLK_CONFIG;
I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
 
if (phy == DPIO_PHY0) {
val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
val |= DW6_OLDO_DYN_PWR_DOWN_EN;
I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
}
 
val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
val &= ~OCL2_LDOFUSE_PWR_DIS;
/*
* On PHY1 disable power on the second channel, since no port is
* connected there. On PHY0 both channels have a port, so leave it
* enabled.
* TODO: port C is only connected on BXT-P, so on BXT0/1 we should
* power down the second channel on PHY0 as well.
*/
if (phy == DPIO_PHY1)
val |= OCL2_LDOFUSE_PWR_DIS;
I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
 
if (phy == DPIO_PHY0) {
uint32_t grc_code;
/*
* PHY0 isn't connected to an RCOMP resistor so copy over
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
10))
DRM_ERROR("timeout waiting for PHY1 GRC\n");
 
val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
 
val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
}
 
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
}
 
void broxton_ddi_phy_init(struct drm_device *dev)
{
/* Enable PHY1 first since it provides Rcomp for PHY0 */
broxton_phy_init(dev->dev_private, DPIO_PHY1);
broxton_phy_init(dev->dev_private, DPIO_PHY0);
}
 
static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
uint32_t val;
 
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
}
 
void broxton_ddi_phy_uninit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
broxton_phy_uninit(dev_priv, DPIO_PHY1);
broxton_phy_uninit(dev_priv, DPIO_PHY0);
 
/* FIXME: do this in broxton_phy_uninit per phy */
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
}
 
static const char * const bxt_ddi_pll_names[] = {
"PORT PLL A",
"PORT PLL B",
"PORT PLL C",
};
 
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t temp;
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
 
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_REF_SEL;
/* Non-SSC reference */
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
 
/* Disable 10 bit clock */
temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
 
/* Write P1 & P2 */
temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->config.hw_state.ebb0;
I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
 
/* Write M2 integer */
temp = I915_READ(BXT_PORT_PLL(port, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->config.hw_state.pll0;
I915_WRITE(BXT_PORT_PLL(port, 0), temp);
 
/* Write N */
temp = I915_READ(BXT_PORT_PLL(port, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->config.hw_state.pll1;
I915_WRITE(BXT_PORT_PLL(port, 1), temp);
 
/* Write M2 fraction */
temp = I915_READ(BXT_PORT_PLL(port, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->config.hw_state.pll2;
I915_WRITE(BXT_PORT_PLL(port, 2), temp);
 
/* Write M2 fraction enable */
temp = I915_READ(BXT_PORT_PLL(port, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->config.hw_state.pll3;
I915_WRITE(BXT_PORT_PLL(port, 3), temp);
 
/* Write coeff */
temp = I915_READ(BXT_PORT_PLL(port, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->config.hw_state.pll6;
I915_WRITE(BXT_PORT_PLL(port, 6), temp);
 
/* Write calibration val */
temp = I915_READ(BXT_PORT_PLL(port, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->config.hw_state.pll8;
I915_WRITE(BXT_PORT_PLL(port, 8), temp);
 
temp = I915_READ(BXT_PORT_PLL(port, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->config.hw_state.pll9;
I915_WRITE(BXT_PORT_PLL(port, 9), temp);
 
temp = I915_READ(BXT_PORT_PLL(port, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->config.hw_state.pll10;
I915_WRITE(BXT_PORT_PLL(port, 10), temp);
 
/* Recalibrate with new settings */
temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
temp |= PORT_PLL_RECALIBRATE;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->config.hw_state.ebb4;
I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
 
/* Enable PLL */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_ENABLE;
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
POSTING_READ(BXT_PORT_PLL_ENABLE(port));
 
if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_LOCK), 200))
DRM_ERROR("PLL %d not locked\n", port);
 
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->config.hw_state.pcsdw12;
I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
}
 
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t temp;
 
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_ENABLE;
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
POSTING_READ(BXT_PORT_PLL_ENABLE(port));
}
 
static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
return false;
 
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
 
hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
 
hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
 
hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
 
hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
 
hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
 
hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
 
hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
 
hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
 
hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
 
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12,
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
return true;
}
 
static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
{
int i;
 
dev_priv->num_shared_dpll = 3;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = bxt_ddi_pll_names[i];
dev_priv->shared_dplls[i].disable = bxt_ddi_pll_disable;
dev_priv->shared_dplls[i].enable = bxt_ddi_pll_enable;
dev_priv->shared_dplls[i].get_hw_state =
bxt_ddi_pll_get_hw_state;
}
}
 
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1911,15 → 2994,23
 
if (IS_SKYLAKE(dev))
skl_shared_dplls_init(dev_priv);
else if (IS_BROXTON(dev))
bxt_shared_dplls_init(dev_priv);
else
hsw_shared_dplls_init(dev_priv);
 
DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
intel_ddi_get_cdclk_freq(dev_priv));
if (IS_SKYLAKE(dev)) {
int cdclk_freq;
 
if (IS_SKYLAKE(dev)) {
cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
dev_priv->skl_boot_cdclk = cdclk_freq;
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
else
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
} else if (IS_BROXTON(dev)) {
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
} else {
/*
* The LCPLL register should be turned on by the BIOS. For now
1989,47 → 3080,32
 
intel_ddi_post_disable(intel_encoder);
 
val = I915_READ(_FDI_RXA_CTL);
val = I915_READ(FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
I915_WRITE(_FDI_RXA_CTL, val);
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 
val = I915_READ(_FDI_RXA_MISC);
val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
I915_WRITE(_FDI_RXA_MISC, val);
I915_WRITE(FDI_RX_MISC(PIPE_A), val);
 
val = I915_READ(_FDI_RXA_CTL);
val = I915_READ(FDI_RX_CTL(PIPE_A));
val &= ~FDI_PCDCLK;
I915_WRITE(_FDI_RXA_CTL, val);
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 
val = I915_READ(_FDI_RXA_CTL);
val = I915_READ(FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_PLL_ENABLE;
I915_WRITE(_FDI_RXA_CTL, val);
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
}
 
static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
int type = intel_dig_port->base.type;
 
if (type != INTEL_OUTPUT_DISPLAYPORT &&
type != INTEL_OUTPUT_EDP &&
type != INTEL_OUTPUT_UNKNOWN) {
return;
}
 
intel_dp_hot_plug(intel_encoder);
}
 
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
struct drm_device *dev = dev_priv->dev;
 
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
2041,7 → 3117,7
else
flags |= DRM_MODE_FLAG_NVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
pipe_config->base.adjusted_mode.flags |= flags;
 
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
2074,6 → 3150,8
case TRANS_DDI_MODE_SELECT_DP_SST:
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->has_dp_encoder = true;
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
break;
default:
2106,10 → 3184,7
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
 
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else
skl_ddi_clock_get(encoder, pipe_config);
intel_ddi_clock_get(encoder, pipe_config);
}
 
static void intel_ddi_destroy(struct drm_encoder *encoder)
2119,7 → 3194,7
}
 
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
int type = encoder->type;
int port = intel_ddi_get_encoder_port(encoder);
2145,7 → 3220,7
struct intel_connector *connector;
enum port port = intel_dig_port->port;
 
connector = kzalloc(sizeof(*connector), GFP_KERNEL);
connector = intel_connector_alloc();
if (!connector)
return NULL;
 
2164,7 → 3239,7
struct intel_connector *connector;
enum port port = intel_dig_port->port;
 
connector = kzalloc(sizeof(*connector), GFP_KERNEL);
connector = intel_connector_alloc();
if (!connector)
return NULL;
 
2186,10 → 3261,9
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
init_hdmi = true;
init_dp = true;
return;
}
 
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
2218,7 → 3292,6
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug;
 
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
2225,7 → 3298,15
goto err;
 
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hpd_irq_port[port] = intel_dig_port;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
&& port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
 
/* In theory we don't need the encoder->type check, but leave it just in
/drivers/video/drm/i915/intel_display.c
37,6 → 37,8
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
43,29 → 45,37
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
 
/* Primary plane formats supported by all gen */
#define COMMON_PRIMARY_FORMATS \
DRM_FORMAT_C8, \
DRM_FORMAT_RGB565, \
DRM_FORMAT_XRGB8888, \
DRM_FORMAT_ARGB8888
 
/* Primary plane formats for gen <= 3 */
static const uint32_t intel_primary_formats_gen2[] = {
COMMON_PRIMARY_FORMATS,
static const uint32_t i8xx_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB8888,
};
 
/* Primary plane formats for gen >= 4 */
static const uint32_t intel_primary_formats_gen4[] = {
COMMON_PRIMARY_FORMATS, \
static const uint32_t i965_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
};
 
static const uint32_t skl_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
 
/* Cursor formats */
76,12 → 86,10
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
struct intel_crtc_state *pipe_config);
 
static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
95,18 → 103,21
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config);
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config);
const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
int num_connectors);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary(struct drm_crtc *crtc);
 
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
if (!connector->mst_port)
return connector->encoder;
else
return &connector->mst_port->mst_encoders[pipe]->base;
}
 
typedef struct {
int min, max;
} intel_range_t;
122,6 → 133,42
intel_p2_t p2;
};
 
/* returns HPLL frequency in kHz */
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
 
/* Obtain SKU information */
mutex_lock(&dev_priv->sb_lock);
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->sb_lock);
 
return vco_freq[hpll_freq] * 1000;
}
 
static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg)
{
u32 val;
int divider;
 
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
 
mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, reg);
mutex_unlock(&dev_priv->sb_lock);
 
divider = val & CCK_FREQUENCY_VALUES;
 
WARN((val & CCK_FREQUENCY_STATUS) !=
(divider << CCK_FREQUENCY_STATUS_SHIFT),
"%s change in progress\n", name);
 
return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
}
 
int
intel_pch_rawclk(struct drm_device *dev)
{
132,6 → 179,50
return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
}
 
/* hrawclock is 1/4 the FSB frequency */
int intel_hrawclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
 
/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
if (IS_VALLEYVIEW(dev))
return 200;
 
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
return 100;
case CLKCFG_FSB_533:
return 133;
case CLKCFG_FSB_667:
return 166;
case CLKCFG_FSB_800:
return 200;
case CLKCFG_FSB_1067:
return 266;
case CLKCFG_FSB_1333:
return 333;
/* these two are just a guess; one of them might be right */
case CLKCFG_FSB_1600:
case CLKCFG_FSB_1600_ALT:
return 400;
default:
return 133;
}
}
 
static void intel_update_czclk(struct drm_i915_private *dev_priv)
{
if (!IS_VALLEYVIEW(dev_priv))
return;
 
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
CCK_CZ_CLOCK_CONTROL);
 
DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
}
 
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
387,7 → 478,7
* them would make no difference.
*/
.dot = { .min = 25000 * 5, .max = 540000 * 5},
.vco = { .min = 4860000, .max = 6700000 },
.vco = { .min = 4800000, .max = 6480000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
.m2 = { .min = 24 << 22, .max = 175 << 22 },
395,14 → 486,22
.p2 = { .p2_slow = 1, .p2_fast = 14 },
};
 
static void vlv_clock(int refclk, intel_clock_t *clock)
static const intel_limit_t intel_limits_bxt = {
/* FIXME: find real dot limits */
.dot = { .min = 0, .max = INT_MAX },
.vco = { .min = 4800000, .max = 6700000 },
.n = { .min = 1, .max = 1 },
.m1 = { .min = 2, .max = 2 },
/* FIXME: find real m2 limits */
.m2 = { .min = 2 << 22, .max = 255 << 22 },
.p1 = { .min = 2, .max = 4 },
.p2 = { .p2_slow = 1, .p2_fast = 20 },
};
 
static bool
needs_modeset(struct drm_crtc_state *state)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
return drm_atomic_crtc_needs_modeset(state);
}
 
/**
426,25 → 525,38
* intel_pipe_has_type() but looking at encoder->new_crtc instead of
* encoder->crtc.
*/
static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
int type)
{
struct drm_device *dev = crtc->base.dev;
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
int i, num_connectors = 0;
 
for_each_intel_encoder(dev, encoder)
if (encoder->new_crtc == crtc && encoder->type == type)
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
 
num_connectors++;
 
encoder = to_intel_encoder(connector_state->best_encoder);
if (encoder->type == type)
return true;
}
 
WARN_ON(num_connectors == 0);
 
return false;
}
 
static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
int refclk)
static const intel_limit_t *
intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
{
struct drm_device *dev = crtc->base.dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
 
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
462,20 → 574,21
return limit;
}
 
static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
static const intel_limit_t *
intel_g4x_limit(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
 
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
483,17 → 596,20
return limit;
}
 
static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
static const intel_limit_t *
intel_limit(struct intel_crtc_state *crtc_state, int refclk)
{
struct drm_device *dev = crtc->base.dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
 
if (HAS_PCH_SPLIT(dev))
limit = intel_ironlake_limit(crtc, refclk);
if (IS_BROXTON(dev))
limit = &intel_limits_bxt;
else if (HAS_PCH_SPLIT(dev))
limit = intel_ironlake_limit(crtc_state, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
limit = intel_g4x_limit(crtc_state);
} else if (IS_PINEVIEW(dev)) {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
502,14 → 618,14
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
517,15 → 633,25
return limit;
}
 
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
* (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
* The helpers' return value is the rate of the clock that is fed to the
* display engine's pipe which can be the above fast dot clock rate or a
* divided-down version of it.
*/
/* m1 is reserved as 0 in Pineview, n is a ring counter */
static void pineview_clock(int refclk, intel_clock_t *clock)
static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return;
return 0;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 
return clock->dot;
}
 
static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
533,25 → 659,41
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
 
static void i9xx_clock(int refclk, intel_clock_t *clock)
static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
return;
return 0;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 
return clock->dot;
}
 
static void chv_clock(int refclk, intel_clock_t *clock)
static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return;
return 0;
clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 
return clock->dot / 5;
}
 
int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0;
clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 
return clock->dot / 5;
}
 
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
573,11 → 715,11
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
 
if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
 
if (!IS_VALLEYVIEW(dev)) {
if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
595,16 → 737,14
return true;
}
 
static bool
i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
static int
i9xx_select_p2_div(const intel_limit_t *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
struct drm_device *dev = crtc_state->base.crtc->dev;
 
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
611,18 → 751,31
* single/dual channel state, if we even can.
*/
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
return limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
return limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
return limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
return limit->p2.p2_fast;
}
}
 
static bool
i9xx_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
intel_clock_t clock;
int err = target;
 
memset(best_clock, 0, sizeof(*best_clock));
 
clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
 
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
635,7 → 788,7
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
 
i9xx_clock(refclk, &clock);
i9xx_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
657,33 → 810,19
}
 
static bool
pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
pnv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
intel_clock_t clock;
int err = target;
 
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
 
memset(best_clock, 0, sizeof(*best_clock));
 
clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
 
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
694,7 → 833,7
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
 
pineview_clock(refclk, &clock);
pnv_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
716,31 → 855,22
}
 
static bool
g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
g4x_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
intel_clock_t clock;
int max_n;
bool found;
bool found = false;
/* approximately equals target * 0.00585 */
int err_most = (target >> 8) + (target >> 9);
found = false;
 
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
memset(best_clock, 0, sizeof(*best_clock));
 
memset(best_clock, 0, sizeof(*best_clock));
clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
 
max_n = limit->n.max;
/* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
753,7 → 883,7
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
 
i9xx_clock(refclk, &clock);
i9xx_calc_dpll_params(refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
772,11 → 902,53
return found;
}
 
/*
* Check if the calculated PLL configuration is more optimal compared to the
* best configuration and error found so far. Return the calculated error.
*/
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
const intel_clock_t *calculated_clock,
const intel_clock_t *best_clock,
unsigned int best_error_ppm,
unsigned int *error_ppm)
{
/*
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
if (IS_CHERRYVIEW(dev)) {
*error_ppm = 0;
 
return calculated_clock->p > best_clock->p;
}
 
if (WARN_ON_ONCE(!target_freq))
return false;
 
*error_ppm = div_u64(1000000ULL *
abs(target_freq - calculated_clock->dot),
target_freq);
/*
* Prefer a better P value over a better (smaller) error if the error
* is small. Ensure this preference for future configurations too by
* setting the error to 0.
*/
if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
*error_ppm = 0;
 
return true;
}
 
return *error_ppm + 10 < best_error_ppm;
}
 
static bool
vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
vlv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
796,50 → 968,49
clock.p = clock.p1 * clock.p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
unsigned int ppm, diff;
unsigned int ppm;
 
clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
refclk * clock.m1);
 
vlv_clock(refclk, &clock);
vlv_calc_dpll_params(refclk, &clock);
 
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
 
diff = abs(clock.dot - target);
ppm = div_u64(1000000ULL * diff, target);
if (!vlv_PLL_is_optimal(dev, target,
&clock,
best_clock,
bestppm, &ppm))
continue;
 
if (ppm < 100 && clock.p > best_clock->p) {
bestppm = 0;
*best_clock = clock;
found = true;
}
 
if (bestppm >= 10 && ppm < bestppm - 10) {
bestppm = ppm;
*best_clock = clock;
found = true;
}
}
}
}
}
 
return found;
}
 
static bool
chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
chv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
intel_clock_t clock;
uint64_t m2;
int found = false;
 
memset(best_clock, 0, sizeof(*best_clock));
best_error_ppm = 1000000;
 
/*
* Based on hardware doc, the n always set to 1, and m1 always
853,6 → 1024,7
for (clock.p2 = limit->p2.p2_fast;
clock.p2 >= limit->p2.p2_slow;
clock.p2 -= clock.p2 > 10 ? 2 : 1) {
unsigned int error_ppm;
 
clock.p = clock.p1 * clock.p2;
 
864,23 → 1036,33
 
clock.m2 = m2;
 
chv_clock(refclk, &clock);
chv_calc_dpll_params(refclk, &clock);
 
if (!intel_PLL_is_valid(dev, limit, &clock))
continue;
 
/* based on hardware requirement, prefer bigger p
*/
if (clock.p > best_clock->p) {
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
best_error_ppm, &error_ppm))
continue;
 
*best_clock = clock;
best_error_ppm = error_ppm;
found = true;
}
}
}
 
return found;
}
 
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock)
{
int refclk = i9xx_get_refclk(crtc_state, 0);
 
return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
target_clock, refclk, NULL, best_clock);
}
 
bool intel_crtc_active(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
893,9 → 1075,13
*
* We can ditch the crtc->primary->fb check as soon as we can
* properly reconstruct framebuffers.
*
* FIXME: The intel_crtc->active here should be switched to
* crtc->state->active once we have proper CRTC states wired up
* for atomic.
*/
return intel_crtc->active && crtc->primary->fb &&
intel_crtc->config.adjusted_mode.crtc_clock;
return intel_crtc->active && crtc->primary->state->fb &&
intel_crtc->config->base.adjusted_mode.crtc_clock;
}
 
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
904,7 → 1090,7
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
return intel_crtc->config.cpu_transcoder;
return intel_crtc->config->cpu_transcoder;
}
 
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
920,7 → 1106,7
line_mask = DSL_LINEMASK_GEN3;
 
line1 = I915_READ(reg) & line_mask;
mdelay(5);
msleep(5);
line2 = I915_READ(reg) & line_mask;
 
return line1 == line2;
946,7 → 1132,7
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
 
if (INTEL_INFO(dev)->gen >= 4) {
963,51 → 1149,6
}
}
 
/*
* ibx_digital_port_connected - is the specified port connected?
* @dev_priv: i915 private structure
* @port: the port to test
*
* Returns true if @port is connected, false otherwise.
*/
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
u32 bit;
 
if (HAS_PCH_IBX(dev_priv->dev)) {
switch (port->port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
case PORT_C:
bit = SDE_PORTC_HOTPLUG;
break;
case PORT_D:
bit = SDE_PORTD_HOTPLUG;
break;
default:
return true;
}
} else {
switch (port->port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
case PORT_C:
bit = SDE_PORTC_HOTPLUG_CPT;
break;
case PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT;
break;
default:
return true;
}
}
 
return I915_READ(SDEISR) & bit;
}
 
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
1017,14 → 1158,12
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
 
reg = DPLL(pipe);
val = I915_READ(reg);
val = I915_READ(DPLL(pipe));
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
1035,12 → 1174,12
u32 val;
bool cur_state;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
cur_state = val & DSI_PLL_VCO_EN;
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
1052,10 → 1191,10
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (crtc->config.shared_dpll < 0)
if (crtc->config->shared_dpll < 0)
return NULL;
 
return &dev_priv->shared_dplls[crtc->config.shared_dpll];
return &dev_priv->shared_dplls[crtc->config->shared_dpll];
}
 
/* For ILK+ */
1071,7 → 1210,7
return;
 
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
1079,8 → 1218,6
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
1087,15 → 1224,13
 
if (HAS_DDI(dev_priv->dev)) {
/* DDI does not have a specific FDI_TX register */
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
u32 val = I915_READ(FDI_TX_CTL(pipe));
cur_state = !!(val & FDI_TX_ENABLE);
}
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
1105,14 → 1240,12
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
 
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
val = I915_READ(FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_ENABLE);
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
1122,7 → 1255,6
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
 
/* ILK FDI PLL is always enabled */
1133,22 → 1265,19
if (HAS_DDI(dev_priv->dev))
return;
 
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
val = I915_READ(FDI_TX_CTL(pipe));
I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
 
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
 
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
val = I915_READ(FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_PLL_ENABLE);
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
1190,7 → 1319,7
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
 
WARN(panel_pipe == pipe && locked,
I915_STATE_WARN(panel_pipe == pipe && locked,
"panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
}
1202,11 → 1331,11
bool cur_state;
 
if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
 
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
1216,8 → 1345,6
void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
1231,12 → 1358,11
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
}
 
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
1244,14 → 1370,12
static void assert_plane(struct drm_i915_private *dev_priv,
enum plane plane, bool state)
{
int reg;
u32 val;
bool cur_state;
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
val = I915_READ(DSPCNTR(plane));
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
WARN(cur_state != state,
I915_STATE_WARN(cur_state != state,
"plane %c assertion failure (expected %s, current %s)\n",
plane_name(plane), state_string(state), state_string(cur_state));
}
1263,15 → 1387,12
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int reg, i;
u32 val;
int cur_pipe;
int i;
 
/* Primary planes are fixed to pipes on gen4+ */
if (INTEL_INFO(dev)->gen >= 4) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
WARN(val & DISPLAY_PLANE_ENABLE,
u32 val = I915_READ(DSPCNTR(pipe));
I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
"plane %c assertion failure, should be disabled but not\n",
plane_name(pipe));
return;
1279,11 → 1400,10
 
/* Need to check both planes against the pipe */
for_each_pipe(dev_priv, i) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
u32 val = I915_READ(DSPCNTR(i));
enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
"plane %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(i), pipe_name(pipe));
}
1293,34 → 1413,30
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int reg, sprite;
u32 val;
int sprite;
 
if (INTEL_INFO(dev)->gen >= 9) {
for_each_sprite(pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
WARN(val & PLANE_CTL_ENABLE,
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(PLANE_CTL(pipe, sprite));
I915_STATE_WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
} else if (IS_VALLEYVIEW(dev)) {
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
WARN(val & SP_ENABLE,
for_each_sprite(dev_priv, pipe, sprite) {
u32 val = I915_READ(SPCNTR(pipe, sprite));
I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
val = I915_READ(reg);
WARN(val & SPRITE_ENABLE,
u32 val = I915_READ(SPRCTL(pipe));
I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
} else if (INTEL_INFO(dev)->gen >= 5) {
reg = DVSCNTR(pipe);
val = I915_READ(reg);
WARN(val & DVS_ENABLE,
u32 val = I915_READ(DVSCNTR(pipe));
I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
}
1328,7 → 1444,7
 
static void assert_vblank_disabled(struct drm_crtc *crtc)
{
if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
drm_crtc_vblank_put(crtc);
}
 
1337,25 → 1453,23
u32 val;
bool enabled;
 
WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
 
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
 
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
bool enabled;
 
reg = PCH_TRANSCONF(pipe);
val = I915_READ(reg);
val = I915_READ(PCH_TRANSCONF(pipe));
enabled = !!(val & TRANS_ENABLE);
WARN(enabled,
I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
1435,11 → 1549,11
enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
 
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
1448,11 → 1562,11
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
 
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
1460,7 → 1574,6
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
 
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1467,15 → 1580,13
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
 
reg = PCH_ADPA;
val = I915_READ(reg);
WARN(adpa_pipe_enabled(dev_priv, pipe, val),
val = I915_READ(PCH_ADPA);
I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
 
reg = PCH_LVDS;
val = I915_READ(reg);
WARN(lvds_pipe_enabled(dev_priv, pipe, val),
val = I915_READ(PCH_LVDS);
I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
 
1484,28 → 1595,8
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
 
static void intel_init_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_VALLEYVIEW(dev))
return;
 
/*
* IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
* CHV x1 PHY (DP/HDMI D)
* IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
*/
if (IS_CHERRYVIEW(dev)) {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
} else {
DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
}
}
 
static void vlv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1544,7 → 1635,7
}
 
static void chv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1556,7 → 1647,7
 
BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* Enable back the 10bit clock to display controller */
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1563,6 → 1654,8
tmp |= DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
 
mutex_unlock(&dev_priv->sb_lock);
 
/*
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
*/
1578,8 → 1671,6
/* not sure when this should be written */
I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(pipe));
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static int intel_num_dvo_pipes(struct drm_device *dev)
1588,7 → 1679,7
int count = 0;
 
for_each_intel_crtc(dev, crtc)
count += crtc->active &&
count += crtc->base.state->active &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
 
return count;
1599,7 → 1690,7
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
u32 dpll = crtc->config.dpll_hw_state.dpll;
u32 dpll = crtc->config->dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
1623,6 → 1714,15
I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
}
 
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
I915_WRITE(reg, 0);
 
I915_WRITE(reg, dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(reg);
udelay(150);
1629,7 → 1729,7
 
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
crtc->config.dpll_hw_state.dpll_md);
crtc->config->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
1669,7 → 1769,7
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
intel_num_dvo_pipes(dev) == 1) {
!intel_num_dvo_pipes(dev)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
1684,13 → 1784,13
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
 
I915_WRITE(DPLL(pipe), 0);
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe));
}
 
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
u32 val = 0;
u32 val;
 
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
1699,8 → 1799,9
* Leave integrated clock source and reference clock enabled for pipe B.
* The latter is needed for VGA hotplug / manual detection.
*/
val = DPLL_VGA_MODE_DIS;
if (pipe == PIPE_B)
val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
 
1715,13 → 1816,14
assert_pipe_disabled(dev_priv, pipe);
 
/* Set PLL en = 0 */
val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* Disable 10bit clock to display controller */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1728,22 → 1830,12
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
 
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
mutex_unlock(&dev_priv->sb_lock);
}
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport)
struct intel_digital_port *dport,
unsigned int expected_mask)
{
u32 port_mask;
int dpll_reg;
1756,6 → 1848,7
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
dpll_reg = DPLL(0);
expected_mask <<= 4;
break;
case PORT_D:
port_mask = DPLL_PORTD_READY_MASK;
1765,9 → 1858,9
BUG();
}
 
if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
port_name(dport->port), I915_READ(dpll_reg));
if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
}
 
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1834,13 → 1927,15
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
/* PCH only available on ILK+ */
BUG_ON(INTEL_INFO(dev)->gen < 5);
if (WARN_ON(pll == NULL))
if (INTEL_INFO(dev)->gen < 5)
return;
 
if (WARN_ON(pll->config.crtc_mask == 0))
if (pll == NULL)
return;
 
if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
return;
 
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
pll->name, pll->active, pll->on,
crtc->base.base.id);
1896,10 → 1991,14
 
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
* make the BPC in transcoder be consistent with
* that in pipeconf reg.
* Make the BPC in transcoder be consistent with
* that in pipeconf reg. For HDMI we must use 8bpc
* here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
val |= PIPECONF_8BPC;
else
val |= pipeconf_val & PIPECONF_BPC_MASK;
}
 
1931,9 → 2030,9
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
 
/* Workaround: set timing override bit. */
val = I915_READ(_TRANSA_CHICKEN2);
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
I915_WRITE(_TRANSA_CHICKEN2, val);
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
 
val = TRANS_ENABLE;
pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1991,9 → 2090,9
DRM_ERROR("Failed to disable PCH transcoder\n");
 
/* Workaround: clear timing override bit. */
val = I915_READ(_TRANSA_CHICKEN2);
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
I915_WRITE(_TRANSA_CHICKEN2, val);
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
}
 
/**
2014,6 → 2113,8
int reg;
u32 val;
 
DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
 
assert_planes_disabled(dev_priv, pipe);
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
2028,13 → 2129,13
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
if (HAS_GMCH_DISPLAY(dev_priv->dev))
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
else {
if (crtc->config.has_pch_encoder) {
if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv,
2068,11 → 2169,13
static void intel_disable_pipe(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
int reg;
u32 val;
 
DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
 
/*
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
2090,7 → 2193,7
* Double wide has implications for planes
* so best keep it disabled when not needed.
*/
if (crtc->config.double_wide)
if (crtc->config->double_wide)
val &= ~PIPECONF_DOUBLE_WIDE;
 
/* Don't disable pipe or pipe PLLs if needed */
2103,121 → 2206,148
intel_wait_for_pipe_off(crtc);
}
 
/*
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane)
static bool need_vtd_wa(struct drm_device *dev)
{
struct drm_device *dev = dev_priv->dev;
u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
 
I915_WRITE(reg, I915_READ(reg));
POSTING_READ(reg);
#ifdef CONFIG_INTEL_IOMMU
if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
return true;
#endif
return false;
}
 
/**
* intel_enable_primary_hw_plane - enable the primary plane on a given pipe
* @plane: plane to be enabled
* @crtc: crtc for the plane
*
* Enable @plane on @crtc, making sure that the pipe is running first.
*/
static void intel_enable_primary_hw_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
uint64_t fb_format_modifier, unsigned int plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned int tile_height;
uint32_t pixel_bytes;
 
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, intel_crtc->pipe);
switch (fb_format_modifier) {
case DRM_FORMAT_MOD_NONE:
tile_height = 1;
break;
case I915_FORMAT_MOD_X_TILED:
tile_height = IS_GEN2(dev) ? 16 : 8;
break;
case I915_FORMAT_MOD_Y_TILED:
tile_height = 32;
break;
case I915_FORMAT_MOD_Yf_TILED:
pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
switch (pixel_bytes) {
default:
case 1:
tile_height = 64;
break;
case 2:
case 4:
tile_height = 32;
break;
case 8:
tile_height = 16;
break;
case 16:
WARN_ONCE(1,
"128-bit pixels are not supported for display!");
tile_height = 16;
break;
}
break;
default:
MISSING_CASE(fb_format_modifier);
tile_height = 1;
break;
}
 
if (intel_crtc->primary_enabled)
return;
return tile_height;
}
 
intel_crtc->primary_enabled = true;
 
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev))
intel_wait_for_vblank(dev, intel_crtc->pipe);
unsigned int
intel_fb_align_height(struct drm_device *dev, unsigned int height,
uint32_t pixel_format, uint64_t fb_format_modifier)
{
return ALIGN(height, intel_tile_height(dev, pixel_format,
fb_format_modifier, 0));
}
 
/**
* intel_disable_primary_hw_plane - disable the primary hardware plane
* @plane: plane to be disabled
* @crtc: crtc for the plane
*
* Disable @plane on @crtc, making sure that the pipe is running first.
*/
static void intel_disable_primary_hw_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
static int
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_rotation_info *info = &view->rotation_info;
unsigned int tile_height, tile_pitch;
 
assert_pipe_enabled(dev_priv, intel_crtc->pipe);
*view = i915_ggtt_view_normal;
 
if (!intel_crtc->primary_enabled)
return;
if (!plane_state)
return 0;
 
intel_crtc->primary_enabled = false;
if (!intel_rotation_90_or_270(plane_state->rotation))
return 0;
 
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
*view = i915_ggtt_view_rotated;
 
info->height = fb->height;
info->pixel_format = fb->pixel_format;
info->pitch = fb->pitches[0];
info->uv_offset = fb->offsets[1];
info->fb_modifier = fb->modifier[0];
 
tile_height = intel_tile_height(fb->dev, fb->pixel_format,
fb->modifier[0], 0);
tile_pitch = PAGE_SIZE / tile_height;
info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
info->size = info->width_pages * info->height_pages * PAGE_SIZE;
 
if (info->pixel_format == DRM_FORMAT_NV12) {
tile_height = intel_tile_height(fb->dev, fb->pixel_format,
fb->modifier[0], 1);
tile_pitch = PAGE_SIZE / tile_height;
info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
tile_height);
info->size_uv = info->width_pages_uv * info->height_pages_uv *
PAGE_SIZE;
}
 
static bool need_vtd_wa(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
return true;
#endif
return false;
return 0;
}
 
static int intel_align_height(struct drm_device *dev, int height, bool tiled)
static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
{
int tile_height;
 
tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
return ALIGN(height, tile_height);
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
IS_VALLEYVIEW(dev_priv))
return 128 * 1024;
else if (INTEL_INFO(dev_priv)->gen >= 4)
return 4 * 1024;
else
return 0;
}
 
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined)
const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
u32 alignment;
int ret;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
else
alignment = 64 * 1024;
switch (fb->modifier[0]) {
case DRM_FORMAT_MOD_NONE:
alignment = intel_linear_alignment(dev_priv);
break;
case I915_TILING_X:
case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else {
2225,13 → 2355,22
alignment = 0;
}
break;
case I915_TILING_Y:
WARN(1, "Y tiled bo slipped through, driver bug!\n");
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
"Y tiling bo slipped through, driver bug!\n"))
return -EINVAL;
alignment = 1 * 1024 * 1024;
break;
default:
BUG();
MISSING_CASE(fb->modifier[0]);
return -EINVAL;
}
 
ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
if (ret)
return ret;
 
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
* we should always have valid PTE following the scanout preventing
2250,7 → 2389,8
intel_runtime_pm_get(dev_priv);
 
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
pipelined_request, &view);
if (ret)
goto err_interruptible;
 
2259,11 → 2399,24
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
if (view.type == I915_GGTT_VIEW_NORMAL) {
ret = i915_gem_object_get_fence(obj);
if (ret)
if (ret == -EDEADLK) {
/*
* -EDEADLK means there are no free fences
* no pending flips.
*
* This is propagated to atomic, but it uses
* -EDEADLK to force a locking recovery, so
* change the returned error to -EBUSY.
*/
ret = -EBUSY;
goto err_unpin;
} else if (ret)
goto err_unpin;
 
i915_gem_object_pin_fence(obj);
}
 
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
2270,7 → 2423,7
return 0;
 
err_unpin:
i915_gem_object_unpin_from_display_plane(obj);
i915_gem_object_unpin_from_display_plane(obj, &view);
err_interruptible:
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
2277,17 → 2430,28
return ret;
}
 
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
int ret;
 
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
 
ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
WARN_ONCE(ret, "Couldn't get view from plane state!");
 
if (view.type == I915_GGTT_VIEW_NORMAL)
i915_gem_object_unpin_fence(obj);
// i915_gem_object_unpin_from_display_plane(obj);
 
i915_gem_object_unpin_from_display_plane(obj, &view);
}
 
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
int *x, int *y,
unsigned int tiling_mode,
unsigned int cpp,
unsigned int pitch)
2303,16 → 2467,17
 
return tile_rows * pitch * 8 + tiles * 4096;
} else {
unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
unsigned int offset;
 
offset = *y * pitch + *x * cpp;
*y = 0;
*x = (offset & 4095) / cpp;
return offset & -4096;
*y = (offset & alignment) / pitch;
*x = ((offset & alignment) - *y * pitch) / cpp;
return offset & ~alignment;
}
}
 
int intel_format_to_fourcc(int format)
static int i9xx_format_to_fourcc(int format)
{
switch (format) {
case DISPPLANE_8BPP:
2333,47 → 2498,83
}
}
 
static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
struct intel_plane_config *plane_config)
static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
{
switch (format) {
case PLANE_CTL_FORMAT_RGB_565:
return DRM_FORMAT_RGB565;
default:
case PLANE_CTL_FORMAT_XRGB_8888:
if (rgb_order) {
if (alpha)
return DRM_FORMAT_ABGR8888;
else
return DRM_FORMAT_XBGR8888;
} else {
if (alpha)
return DRM_FORMAT_ARGB8888;
else
return DRM_FORMAT_XRGB8888;
}
case PLANE_CTL_FORMAT_XRGB_2101010:
if (rgb_order)
return DRM_FORMAT_XBGR2101010;
else
return DRM_FORMAT_XRGB2101010;
}
}
 
static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
u32 base = plane_config->base;
struct drm_framebuffer *fb = &plane_config->fb->base;
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
u32 size_aligned = round_up(plane_config->base + plane_config->size,
PAGE_SIZE);
 
size_aligned -= base_aligned;
 
if (plane_config->size == 0)
return false;
 
obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
plane_config->size);
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
return false;
 
obj = i915_gem_object_create_stolen_for_preallocated(dev,
base_aligned,
base_aligned,
size_aligned);
if (!obj)
return false;
 
obj->map_and_fenceable=true;
main_fb_obj = obj;
obj->tiling_mode = plane_config->tiling;
if (obj->tiling_mode == I915_TILING_X)
obj->stride = fb->pitches[0];
 
if (plane_config->tiled) {
obj->tiling_mode = I915_TILING_X;
obj->stride = crtc->base.primary->fb->pitches[0];
}
mode_cmd.pixel_format = fb->pixel_format;
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
mode_cmd.pitches[0] = fb->pitches[0];
mode_cmd.modifier[0] = fb->modifier[0];
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
 
mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
mode_cmd.width = crtc->base.primary->fb->width;
mode_cmd.height = crtc->base.primary->fb->height;
mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
 
mutex_lock(&dev->struct_mutex);
 
if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
 
obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
 
DRM_DEBUG_KMS("plane fb obj %p\n", obj);
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
return true;
 
out_unref_obj:
2382,23 → 2583,44
return false;
}
 
static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct intel_plane_config *plane_config)
/* Update plane->state->fb to match plane->fb after driver-internal updates */
static void
update_state_fb(struct drm_plane *plane)
{
if (plane->fb == plane->state->fb)
return;
 
if (plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
plane->state->fb = plane->fb;
if (plane->state->fb)
drm_framebuffer_reference(plane->state->fb);
}
 
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
struct drm_crtc_state *crtc_state = intel_crtc->base.state;
struct intel_plane *intel_plane = to_intel_plane(primary);
struct drm_framebuffer *fb;
 
if (!intel_crtc->base.primary->fb)
if (!plane_config->fb)
return;
 
if (intel_alloc_plane_obj(intel_crtc, plane_config))
return;
if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
fb = &plane_config->fb->base;
goto valid_fb;
}
 
kfree(intel_crtc->base.primary->fb);
intel_crtc->base.primary->fb = NULL;
kfree(plane_config->fb);
 
/*
* Failed to alloc the obj, check to see if we should share
2413,21 → 2635,52
if (!i->active)
continue;
 
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
fb = c->primary->fb;
if (!fb)
continue;
 
obj = intel_fb_obj(fb);
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
drm_framebuffer_reference(fb);
goto valid_fb;
}
}
 
/*
* We've failed to reconstruct the BIOS FB. Current display state
* indicates that the primary plane is visible, but has a NULL FB,
* which will lead to problems later if we don't fix it up. The
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
to_intel_plane_state(plane_state)->visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
intel_pre_disable_primary(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
 
return;
 
valid_fb:
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_w = fb->width << 16;
plane_state->src_h = fb->height << 16;
 
plane_state->crtc_x = 0;
plane_state->crtc_y = 0;
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
 
obj = intel_fb_obj(fb);
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
 
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
drm_framebuffer_reference(fb);
primary->fb = primary->state->fb = fb;
primary->crtc = primary->state->crtc = &intel_crtc->base;
intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
}
}
}
 
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
2436,6 → 2689,8
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
bool visible = to_intel_plane_state(primary->state)->visible;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
2443,7 → 2698,7
u32 reg = DSPCNTR(plane);
int pixel_size;
 
if (!intel_crtc->primary_enabled) {
if (!visible || !fb) {
I915_WRITE(reg, 0);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(DSPSURF(plane), 0);
2471,13 → 2726,13
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
((intel_crtc->config->pipe_src_h - 1) << 16) |
(intel_crtc->config->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
((intel_crtc->config->pipe_src_h - 1) << 16) |
(intel_crtc->config->pipe_src_w - 1));
I915_WRITE(PRIMPOS(plane), 0);
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
2487,7 → 2742,6
dspcntr |= DISPPLANE_8BPP;
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
dspcntr |= DISPPLANE_BGRX555;
break;
case DRM_FORMAT_RGB565:
2494,19 → 2748,15
dspcntr |= DISPPLANE_BGRX565;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
dspcntr |= DISPPLANE_BGRX888;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
2524,7 → 2774,8
 
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
2532,24 → 2783,24
intel_crtc->dspaddr_offset = linear_offset;
}
 
if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
 
x += (intel_crtc->config.pipe_src_w - 1);
y += (intel_crtc->config.pipe_src_h - 1);
x += (intel_crtc->config->pipe_src_w - 1);
y += (intel_crtc->config->pipe_src_h - 1);
 
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
(intel_crtc->config.pipe_src_w - 1) * pixel_size;
(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
(intel_crtc->config->pipe_src_w - 1) * pixel_size;
}
 
intel_crtc->adjusted_x = x;
intel_crtc->adjusted_y = y;
 
I915_WRITE(reg, dspcntr);
 
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane),
2568,6 → 2819,8
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
bool visible = to_intel_plane_state(primary->state)->visible;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
2575,7 → 2828,7
u32 reg = DSPCNTR(plane);
int pixel_size;
 
if (!intel_crtc->primary_enabled) {
if (!visible || !fb) {
I915_WRITE(reg, 0);
I915_WRITE(DSPSURF(plane), 0);
POSTING_READ(reg);
2603,19 → 2856,15
dspcntr |= DISPPLANE_BGRX565;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
dspcntr |= DISPPLANE_BGRX888;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
2630,30 → 2879,31
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
intel_crtc->dspaddr_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
 
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
x += (intel_crtc->config.pipe_src_w - 1);
y += (intel_crtc->config.pipe_src_h - 1);
x += (intel_crtc->config->pipe_src_w - 1);
y += (intel_crtc->config->pipe_src_h - 1);
 
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
(intel_crtc->config.pipe_src_w - 1) * pixel_size;
(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
(intel_crtc->config->pipe_src_w - 1) * pixel_size;
}
}
 
intel_crtc->adjusted_x = x;
intel_crtc->adjusted_y = y;
 
I915_WRITE(reg, dspcntr);
 
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2666,6 → 2916,174
POSTING_READ(reg);
}
 
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format)
{
u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
 
/*
* The stride is either expressed as a multiple of 64 bytes
* chunks for linear buffers or in number of tiles for tiled
* buffers.
*/
switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
return 64;
case I915_FORMAT_MOD_X_TILED:
if (INTEL_INFO(dev)->gen == 2)
return 128;
return 512;
case I915_FORMAT_MOD_Y_TILED:
/* No need to check for old gens and Y tiling since this is
* about the display engine and those will be blocked before
* we get here.
*/
return 128;
case I915_FORMAT_MOD_Yf_TILED:
if (bits_per_pixel == 8)
return 64;
else
return 128;
default:
MISSING_CASE(fb_modifier);
return 64;
}
}
 
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
struct drm_i915_gem_object *obj,
unsigned int plane)
{
const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
struct i915_vma *vma;
unsigned char *offset;
 
if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
view = &i915_ggtt_view_rotated;
 
vma = i915_gem_obj_to_ggtt_view(obj, view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
view->type))
return -1;
 
offset = (unsigned char *)vma->node.start;
 
if (plane == 1) {
offset += vma->ggtt_view.rotation_info.uv_start_page *
PAGE_SIZE;
}
 
return (unsigned long)offset;
}
 
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
}
 
/*
* This function detaches (aka. unbinds) unused scalers in hardware
*/
static void skl_detach_scalers(struct intel_crtc *intel_crtc)
{
struct intel_crtc_scaler_state *scaler_state;
int i;
 
scaler_state = &intel_crtc->config->scaler_state;
 
/* loop through and disable scalers that aren't in use */
for (i = 0; i < intel_crtc->num_scalers; i++) {
if (!scaler_state->scalers[i].in_use)
skl_detach_scaler(intel_crtc, i);
}
}
 
u32 skl_plane_ctl_format(uint32_t pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_C8:
return PLANE_CTL_FORMAT_INDEXED;
case DRM_FORMAT_RGB565:
return PLANE_CTL_FORMAT_RGB_565;
case DRM_FORMAT_XBGR8888:
return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB8888:
return PLANE_CTL_FORMAT_XRGB_8888;
/*
* XXX: For ARBG/ABGR formats we default to expecting scanout buffers
* to be already pre-multiplied. We need to add a knob (or a different
* DRM_FORMAT) for user-space to configure that.
*/
case DRM_FORMAT_ABGR8888:
return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
case DRM_FORMAT_ARGB8888:
return PLANE_CTL_FORMAT_XRGB_8888 |
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
case DRM_FORMAT_XRGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_YUYV:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
case DRM_FORMAT_YVYU:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
case DRM_FORMAT_UYVY:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
case DRM_FORMAT_VYUY:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
default:
MISSING_CASE(pixel_format);
}
 
return 0;
}
 
u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
{
switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
break;
case I915_FORMAT_MOD_X_TILED:
return PLANE_CTL_TILED_X;
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
default:
MISSING_CASE(fb_modifier);
}
 
return 0;
}
 
u32 skl_plane_ctl_rotation(unsigned int rotation)
{
switch (rotation) {
case BIT(DRM_ROTATE_0):
break;
/*
* DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
* while i915 HW rotation is clockwise, thats why this swapping.
*/
case BIT(DRM_ROTATE_90):
return PLANE_CTL_ROTATE_270;
case BIT(DRM_ROTATE_180):
return PLANE_CTL_ROTATE_180;
case BIT(DRM_ROTATE_270):
return PLANE_CTL_ROTATE_90;
default:
MISSING_CASE(rotation);
}
 
return 0;
}
 
static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
2673,12 → 3091,24
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_plane *plane = crtc->primary;
bool visible = to_intel_plane_state(plane->state)->visible;
struct drm_i915_gem_object *obj;
int pipe = intel_crtc->pipe;
u32 plane_ctl, stride;
u32 plane_ctl, stride_div, stride;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
unsigned long surf_addr;
struct intel_crtc_state *crtc_state = intel_crtc->config;
struct intel_plane_state *plane_state;
int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
int scaler_id = -1;
 
if (!intel_crtc->primary_enabled) {
plane_state = to_intel_plane_state(plane->state);
 
if (!visible || !fb) {
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_CTL(pipe, 0));
2689,66 → 3119,73
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
 
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
break;
case DRM_FORMAT_XRGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
case DRM_FORMAT_XBGR8888:
plane_ctl |= PLANE_CTL_ORDER_RGBX;
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
case DRM_FORMAT_XRGB2101010:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
break;
case DRM_FORMAT_XBGR2101010:
plane_ctl |= PLANE_CTL_ORDER_RGBX;
plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
break;
default:
BUG();
}
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
rotation = plane->state->rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
 
/*
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
switch (obj->tiling_mode) {
case I915_TILING_NONE:
stride = fb->pitches[0] >> 6;
break;
case I915_TILING_X:
plane_ctl |= PLANE_CTL_TILED_X;
stride = fb->pitches[0] >> 9;
break;
default:
BUG();
obj = intel_fb_obj(fb);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
 
WARN_ON(drm_rect_width(&plane_state->src) == 0);
 
scaler_id = plane_state->scaler_id;
src_x = plane_state->src.x1 >> 16;
src_y = plane_state->src.y1 >> 16;
src_w = drm_rect_width(&plane_state->src) >> 16;
src_h = drm_rect_height(&plane_state->src) >> 16;
dst_x = plane_state->dst.x1;
dst_y = plane_state->dst.y1;
dst_w = drm_rect_width(&plane_state->dst);
dst_h = drm_rect_height(&plane_state->dst);
 
WARN_ON(x != src_x || y != src_y);
 
if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height);
x_offset = stride * tile_height - y - src_h;
y_offset = x;
plane_size = (src_w - 1) << 16 | (src_h - 1);
} else {
stride = fb->pitches[0] / stride_div;
x_offset = x;
y_offset = y;
plane_size = (src_h - 1) << 16 | (src_w - 1);
}
plane_offset = y_offset << 16 | x_offset;
 
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
intel_crtc->adjusted_x = x_offset;
intel_crtc->adjusted_y = y_offset;
 
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
 
DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
i915_gem_obj_ggtt_offset(obj),
x, y, fb->width, fb->height,
fb->pitches[0]);
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
 
WARN_ON(!dst_w || !dst_h);
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
crtc_state->scaler_state.scalers[scaler_id].mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
(intel_crtc->config.pipe_src_h - 1) << 16 |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
} else {
I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
}
 
I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
 
POSTING_READ(PLANE_SURF(pipe, 0));
}
 
2760,8 → 3197,8
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
if (dev_priv->fbc.disable_fbc)
dev_priv->fbc.disable_fbc(dev_priv);
 
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
2784,32 → 3221,25
 
static void intel_update_primary_planes(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *plane = to_intel_plane(crtc->primary);
struct intel_plane_state *plane_state;
 
drm_modeset_lock(&crtc->mutex, NULL);
/*
* FIXME: Once we have proper support for primary planes (and
* disabling them without disabling the entire crtc) allow again
* a NULL crtc->primary->fb.
*/
if (intel_crtc->active && crtc->primary->fb)
dev_priv->display.update_primary_plane(crtc,
crtc->primary->fb,
crtc->x,
crtc->y);
drm_modeset_unlock(&crtc->mutex);
drm_modeset_lock_crtc(crtc, &plane->base);
 
plane_state = to_intel_plane_state(plane->base.state);
 
if (plane_state->base.fb)
plane->commit_plane(&plane->base, plane_state);
 
drm_modeset_unlock_crtc(crtc);
}
}
 
void intel_prepare_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
 
/* no reset support for gen2 */
if (IS_GEN2(dev))
return;
2819,16 → 3249,12
return;
 
drm_modeset_lock_all(dev);
 
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
for_each_intel_crtc(dev, crtc) {
if (crtc->active)
dev_priv->display.crtc_disable(&crtc->base);
intel_display_suspend(dev);
}
}
 
void intel_finish_reset(struct drm_device *dev)
{
2852,6 → 3278,9
* so update the base address of all primary
* planes to the the last fb to make sure we're
* showing the correct fb after a reset.
*
* FIXME: Atomic will make this obsolete since we won't schedule
* CS-based flips (which might get lost in gpu resets) any more.
*/
intel_update_primary_planes(dev);
return;
2871,7 → 3300,7
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
 
intel_modeset_setup_hw_state(dev, true);
intel_display_resume(dev);
 
intel_hpd_init(dev_priv);
 
2878,11 → 3307,11
drm_modeset_unlock_all(dev);
}
 
static int
static void
intel_finish_fb(struct drm_framebuffer *old_fb)
{
struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
 
2889,16 → 3318,19
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
* framebuffer. Note that we rely on userspace rendering
* into the buffer attached to the pipe they are waiting
* on. If not, userspace generates a GPU hang with IPEHR
* point to the MI_WAIT_FOR_EVENT.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
dev_priv->mm.interruptible = false;
ret = i915_gem_object_finish_gpu(obj);
ret = i915_gem_object_wait_rendering(obj, true);
dev_priv->mm.interruptible = was_interruptible;
 
return ret;
WARN_ON(ret);
}
 
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2920,15 → 3352,24
}
#endif
 
static void intel_update_pipe_size(struct intel_crtc *crtc)
static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_display_mode *adjusted_mode;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
 
if (!i915.fastboot)
return;
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
crtc->base.mode = crtc->base.state->mode;
 
DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
 
if (HAS_DDI(dev))
intel_set_pipe_csc(&crtc->base);
 
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
2936,90 → 3377,26
* fastboot case, we'll flip, but if we don't update the pipesrc and
* pfit state, we'll end up with a big fb scanned out into the wrong
* sized surface.
*
* To fix this properly, we need to hoist the checks up into
* compute_mode_changes (or above), check the actual pfit state and
* whether the platform allows pfit disable with pipe active, and only
* then update the pipesrc and pfit state, even on the flip path.
*/
 
adjusted_mode = &crtc->config.adjusted_mode;
 
I915_WRITE(PIPESRC(crtc->pipe),
((adjusted_mode->crtc_hdisplay - 1) << 16) |
(adjusted_mode->crtc_vdisplay - 1));
if (!crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(crtc->pipe), 0);
I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
}
crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
((pipe_config->pipe_src_w - 1) << 16) |
(pipe_config->pipe_src_h - 1));
 
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
/* on skylake this is done by detaching scalers */
if (INTEL_INFO(dev)->gen >= 9) {
skl_detach_scalers(crtc);
 
 
/* no fb bound */
if (!fb) {
DRM_ERROR("No FB bound\n");
return 0;
if (pipe_config->pch_pfit.enabled)
skylake_pfit_enable(crtc);
} else if (HAS_PCH_SPLIT(dev)) {
if (pipe_config->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
ironlake_pfit_disable(crtc, true);
}
 
if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
plane_name(intel_crtc->plane),
INTEL_INFO(dev)->num_pipes);
return -EINVAL;
}
 
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret == 0)
i915_gem_track_fb(old_obj, intel_fb_obj(fb),
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
return ret;
}
 
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
if (intel_crtc->active)
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
 
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
 
if (old_fb) {
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
return 0;
}
 
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3061,38 → 3438,6
FDI_FE_ERRC_ENABLE);
}
 
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
{
return crtc->base.enabled && crtc->active &&
crtc->config.has_pch_encoder;
}
 
static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
struct intel_crtc *pipe_C_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
uint32_t temp;
 
/*
* When everything is off disable fdi C so that we could enable fdi B
* with all lanes. Note that we don't care about enabled pipes without
* an enabled pch encoder.
*/
if (!pipe_has_enabled_pch(pipe_B_crtc) &&
!pipe_has_enabled_pch(pipe_C_crtc)) {
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
 
temp = I915_READ(SOUTH_CHICKEN1);
temp &= ~FDI_BC_BIFURCATION_SELECT;
DRM_DEBUG_KMS("disabling fdi C rx\n");
I915_WRITE(SOUTH_CHICKEN1, temp);
}
}
 
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
3119,7 → 3464,7
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
3217,7 → 3562,7
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3368,7 → 3713,7
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
3456,7 → 3801,7
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 
3619,11 → 3964,11
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
3700,7 → 4045,7
 
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3708,7 → 4053,7
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
3727,20 → 4072,23
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
 
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp;
 
temp = I915_READ(SOUTH_CHICKEN1);
if (temp & FDI_BC_BIFURCATION_SELECT)
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
 
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
 
temp &= ~FDI_BC_BIFURCATION_SELECT;
if (enable)
temp |= FDI_BC_BIFURCATION_SELECT;
DRM_DEBUG_KMS("enabling fdi C rx\n");
 
DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
I915_WRITE(SOUTH_CHICKEN1, temp);
POSTING_READ(SOUTH_CHICKEN1);
}
3748,20 → 4096,19
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
switch (intel_crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (intel_crtc->config.fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
if (intel_crtc->config->fdi_lanes > 2)
cpt_set_fdi_bc_bifurcation(dev, false);
else
cpt_enable_fdi_bc_bifurcation(dev);
cpt_set_fdi_bc_bifurcation(dev, true);
 
break;
case PIPE_C:
cpt_enable_fdi_bc_bifurcation(dev);
cpt_set_fdi_bc_bifurcation(dev, true);
 
break;
default:
3806,7 → 4153,7
temp = I915_READ(PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
temp |= sel;
else
temp &= ~sel;
3829,7 → 4176,7
intel_fdi_normal_train(crtc);
 
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
3836,8 → 4183,7
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
temp |= TRANS_DP_OUTPUT_ENABLE;
temp |= bpc << 9; /* same format but at 11:9 */
 
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3870,7 → 4216,7
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
 
3882,33 → 4228,17
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
 
void intel_put_shared_dpll(struct intel_crtc *crtc)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
if (pll == NULL)
return;
 
if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
WARN(1, "bad %s crtc mask\n", pll->name);
return;
}
 
pll->config.crtc_mask &= ~(1 << crtc->pipe);
if (pll->config.crtc_mask == 0) {
WARN_ON(pll->on);
WARN_ON(pll->active);
}
 
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
 
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll;
struct intel_shared_dpll_config *shared_dpll;
enum intel_dpll_id i;
int max = dev_priv->num_shared_dpll;
 
shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
 
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
3917,24 → 4247,46
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
 
WARN_ON(pll->new_config->crtc_mask);
WARN_ON(shared_dpll[i].crtc_mask);
 
goto found;
}
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
if (IS_BROXTON(dev_priv->dev)) {
/* PLL is attached to port in bxt */
struct intel_encoder *encoder;
struct intel_digital_port *intel_dig_port;
 
encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
if (WARN_ON(!encoder))
return NULL;
 
intel_dig_port = enc_to_dig_port(&encoder->base);
/* 1:1 mapping between ports and PLLs */
i = (enum intel_dpll_id)intel_dig_port->port;
pll = &dev_priv->shared_dplls[i];
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
WARN_ON(shared_dpll[i].crtc_mask);
 
goto found;
} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
/* Do not consider SPLL */
max = 2;
 
for (i = 0; i < max; i++) {
pll = &dev_priv->shared_dplls[i];
 
/* Only want to check enabled timings first */
if (pll->new_config->crtc_mask == 0)
if (shared_dpll[i].crtc_mask == 0)
continue;
 
if (memcmp(&crtc->new_config->dpll_hw_state,
&pll->new_config->hw_state,
sizeof(pll->new_config->hw_state)) == 0) {
if (memcmp(&crtc_state->dpll_hw_state,
&shared_dpll[i].hw_state,
sizeof(crtc_state->dpll_hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
crtc->base.base.id, pll->name,
pll->new_config->crtc_mask,
shared_dpll[i].crtc_mask,
pll->active);
goto found;
}
3943,7 → 4295,7
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
if (pll->new_config->crtc_mask == 0) {
if (shared_dpll[i].crtc_mask == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
3953,99 → 4305,209
return NULL;
 
found:
if (pll->new_config->crtc_mask == 0)
pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
if (shared_dpll[i].crtc_mask == 0)
shared_dpll[i].hw_state =
crtc_state->dpll_hw_state;
 
crtc->new_config->shared_dpll = i;
crtc_state->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
 
pll->new_config->crtc_mask |= 1 << crtc->pipe;
shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
 
return pll;
}
 
/**
* intel_shared_dpll_start_config - start a new PLL staged config
* @dev_priv: DRM device
* @clear_pipes: mask of pipes that will have their PLLs freed
*
* Starts a new PLL staged config, copying the current config but
* releasing the references of pipes specified in clear_pipes.
*/
static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
unsigned clear_pipes)
static void intel_shared_dpll_commit(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_shared_dpll_config *shared_dpll;
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
 
if (!to_intel_atomic_state(state)->dpll_set)
return;
 
shared_dpll = to_intel_atomic_state(state)->shared_dpll;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
pll->config = shared_dpll[i];
}
}
 
pll->new_config = kmemdup(&pll->config, sizeof pll->config,
GFP_KERNEL);
if (!pll->new_config)
goto cleanup;
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dslreg = PIPEDSL(pipe);
u32 temp;
 
pll->new_config->crtc_mask &= ~clear_pipes;
temp = I915_READ(dslreg);
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
}
}
 
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned scaler_user, int *scaler_id, unsigned int rotation,
int src_w, int src_h, int dst_w, int dst_h)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct intel_crtc *intel_crtc =
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
 
need_scaling = intel_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
(src_w != dst_w || src_h != dst_h);
 
/*
* if plane is being disabled or scaler is no more required or force detach
* - free scaler binded to this plane/crtc
* - in order to do this, update crtc->scaler_usage
*
* Here scaler state in crtc_state is set free so that
* scaler can be assigned to other user. Actual register
* update to free the scaler is done in plane/panel-fit programming.
* For this purpose crtc/plane_state->scaler_id isn't reset here.
*/
if (force_detach || !need_scaling) {
if (*scaler_id >= 0) {
scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
 
DRM_DEBUG_KMS("scaler_user index %u.%u: "
"Staged freeing scaler id %d scaler_users = 0x%x\n",
intel_crtc->pipe, scaler_user, *scaler_id,
scaler_state->scaler_users);
*scaler_id = -1;
}
return 0;
}
 
cleanup:
while (--i >= 0) {
pll = &dev_priv->shared_dplls[i];
kfree(pll->new_config);
pll->new_config = NULL;
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
 
src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
return -EINVAL;
}
 
return -ENOMEM;
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
DRM_DEBUG_KMS("scaler_user index %u.%u: "
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
scaler_state->scaler_users);
 
return 0;
}
 
static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
/**
* skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
*
* @state: crtc's scaler state
*
* Return
* 0 - scaler_usage updated successfully
* error - requested scaling cannot be supported or other error condition
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
 
WARN_ON(pll->new_config == &pll->config);
 
pll->config = *pll->new_config;
kfree(pll->new_config);
pll->new_config = NULL;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, DRM_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
}
 
static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
/**
* skl_update_scaler_plane - Stages update to scaler state for a given plane.
*
* @state: crtc's scaler state
* @plane_state: atomic plane state to update
*
* Return
* 0 - scaler_usage updated successfully
* error - requested scaling cannot be supported or other error condition
*/
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
 
WARN_ON(pll->new_config == &pll->config);
bool force_detach = !fb || !plane_state->visible;
 
kfree(pll->new_config);
pll->new_config = NULL;
DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
intel_plane->base.base.id, intel_crtc->pipe,
drm_plane_index(&intel_plane->base));
 
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
plane_state->base.rotation,
drm_rect_width(&plane_state->src) >> 16,
drm_rect_height(&plane_state->src) >> 16,
drm_rect_width(&plane_state->dst),
drm_rect_height(&plane_state->dst));
 
if (ret || plane_state->scaler_id < 0)
return ret;
 
/* check colorkey */
if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
intel_plane->base.base.id);
return -EINVAL;
}
 
/* Check src format */
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
break;
default:
DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
intel_plane->base.base.id, fb->base.id, fb->pixel_format);
return -EINVAL;
}
 
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
return 0;
}
 
static void skylake_scaler_disable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dslreg = PIPEDSL(pipe);
u32 temp;
int i;
 
temp = I915_READ(dslreg);
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
for (i = 0; i < crtc->num_scalers; i++)
skl_detach_scaler(crtc, i);
}
}
 
static void skylake_pfit_enable(struct intel_crtc *crtc)
{
4052,13 → 4514,28
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
struct intel_crtc_scaler_state *scaler_state =
&crtc->config->scaler_state;
 
if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), PS_ENABLE);
I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
 
if (crtc->config->pch_pfit.enabled) {
int id;
 
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
DRM_ERROR("Requesting pfit without getting a scaler first\n");
return;
}
 
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
 
DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
}
}
 
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
4066,7 → 4543,7
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
if (crtc->config.pch_pfit.enabled) {
if (crtc->config->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
4076,45 → 4553,17
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
}
}
 
static void intel_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_plane *plane;
struct intel_plane *intel_plane;
 
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
intel_plane_restore(&intel_plane->base);
}
}
 
static void intel_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_plane *plane;
struct intel_plane *intel_plane;
 
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe == pipe)
intel_plane_disable(&intel_plane->base);
}
}
 
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!crtc->config.ips_enabled)
if (!crtc->config->ips_enabled)
return;
 
/* We can only enable IPS after we enable a plane and wait for a vblank */
4147,7 → 4596,7
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!crtc->config.ips_enabled)
if (!crtc->config->ips_enabled)
return;
 
assert_plane_enabled(dev_priv, crtc->plane);
4174,15 → 4623,14
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
int palreg = PALETTE(pipe);
int i;
bool reenable_ips = false;
 
/* The clocks have to be on to load the palette. */
if (!crtc->enabled || !intel_crtc->active)
if (!crtc->state->active)
return;
 
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
4189,14 → 4637,10
assert_pll_enabled(dev_priv, pipe);
}
 
/* use legacy palette for Ironlake */
if (!HAS_GMCH_DISPLAY(dev))
palreg = LGC_PALETTE(pipe);
 
/* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
4204,7 → 4648,14
}
 
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
u32 palreg;
 
if (HAS_GMCH_DISPLAY(dev))
palreg = PALETTE(pipe, i);
else
palreg = LGC_PALETTE(pipe, i);
 
I915_WRITE(palreg,
(intel_crtc->lut_r[i] << 16) |
(intel_crtc->lut_g[i] << 8) |
intel_crtc->lut_b[i]);
4214,9 → 4665,9
hsw_enable_ips(intel_crtc);
}
 
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
if (!enable && intel_crtc->overlay) {
if (intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
4232,50 → 4683,181
*/
}
 
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
/**
* intel_post_enable_primary - Perform operations after enabling primary plane
* @crtc: the CRTC whose primary plane was just enabled
*
* Performs potentially sleeping operations that must be done after the primary
* plane is enabled, such as updating FBC and IPS. Note that this may be
* called due to an explicit primary plane update, or due to an implicit
* re-enable that is caused when a sprite plane is updated to no longer
* completely hide the primary plane.
*/
static void
intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
 
intel_enable_primary_hw_plane(crtc->primary, crtc);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev))
intel_wait_for_vblank(dev, pipe);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
* versa.
*/
hsw_enable_ips(intel_crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
* to compute the mask of flip planes precisely. For the time being
* consider this a flip from a NULL plane.
* Gen2 reports pipe underruns whenever all planes are disabled.
* So don't enable underrun reporting before at least some planes
* are enabled.
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
/* Underruns don't raise interrupts, so check manually. */
if (HAS_GMCH_DISPLAY(dev))
i9xx_check_fifo_underruns(dev_priv);
}
 
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
/**
* intel_pre_disable_primary - Perform operations before disabling primary plane
* @crtc: the CRTC whose primary plane is to be disabled
*
* Performs potentially sleeping operations that must be done before the
* primary plane is disabled, such as updating FBC and IPS. Note that this may
* be called due to an explicit primary plane update, or due to an implicit
* disable that is caused when a sprite plane completely hides the primary
* plane.
*/
static void
intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So diasble underrun reporting before all the planes get disabled.
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
* moment. So to make sure the plane gets truly disabled, disable
* first the self-refresh mode. The self-refresh enable bit in turn
* will be checked/applied by the HW only at the next frame start
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH_DISPLAY(dev)) {
intel_set_memory_cxsr(dev_priv, false);
dev_priv->wm.vlv.cxsr = false;
intel_wait_for_vblank(dev, pipe);
}
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
* versa.
*/
hsw_disable_ips(intel_crtc);
}
 
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_hw_plane(crtc->primary, crtc);
static void intel_post_plane_update(struct intel_crtc *crtc)
{
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_plane *plane;
 
if (atomic->wait_vblank)
intel_wait_for_vblank(dev, crtc->pipe);
 
intel_frontbuffer_flip(dev, atomic->fb_bits);
 
if (atomic->disable_cxsr)
crtc->wm.cxsr_allowed = true;
 
if (crtc->atomic.update_wm_post)
intel_update_watermarks(&crtc->base);
 
if (atomic->update_fbc)
intel_fbc_update(dev_priv);
 
if (atomic->post_enable_primary)
intel_post_enable_primary(&crtc->base);
 
drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
intel_update_sprite_watermarks(plane, &crtc->base,
0, 0, 0, false, false);
 
memset(atomic, 0, sizeof(*atomic));
}
 
static void intel_pre_plane_update(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct drm_plane *p;
 
/* Track fb's for any planes being disabled */
drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
struct intel_plane *plane = to_intel_plane(p);
 
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
plane->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
}
 
if (atomic->disable_fbc)
intel_fbc_disable_crtc(crtc);
 
if (crtc->atomic.disable_ips)
hsw_disable_ips(crtc);
 
if (atomic->pre_disable_primary)
intel_pre_disable_primary(&crtc->base);
 
if (atomic->disable_cxsr) {
crtc->wm.cxsr_allowed = false;
intel_set_memory_cxsr(dev_priv, false);
}
}
 
static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *p;
int pipe = intel_crtc->pipe;
 
intel_crtc_dpms_overlay_disable(intel_crtc);
 
drm_for_each_plane_mask(p, dev, plane_mask)
to_intel_plane(p)->disable_plane(p, crtc);
 
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
* to compute the mask of flip planes precisely. For the time being
4292,22 → 4874,20
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
if (WARN_ON(intel_crtc->active))
return;
 
if (intel_crtc->config.has_pch_encoder)
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
 
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n, NULL);
&intel_crtc->config->fdi_m_n, NULL);
}
 
ironlake_set_pipeconf(crtc);
4321,7 → 4901,7
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
if (intel_crtc->config.has_pch_encoder) {
if (intel_crtc->config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
4342,19 → 4922,17
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
if (intel_crtc->config->has_pch_encoder)
ironlake_pch_enable(crtc);
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
intel_crtc_enable_planes(crtc);
}
 
/* IPS only exists on ULT machines and is tied to pipe A. */
4363,35 → 4941,6
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
 
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
* multiple pipes, and planes are enabled after the pipe, we need to wait at
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
*/
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_crtc *crtc_it, *other_active_crtc = NULL;
 
/* We want to get the other_active_crtc only if there's only 1 other
* active crtc. */
for_each_intel_crtc(dev, crtc_it) {
if (!crtc_it->active || crtc_it == crtc)
continue;
 
if (other_active_crtc)
return;
 
other_active_crtc = crtc_it;
}
if (!other_active_crtc)
return;
 
intel_wait_for_vblank(dev, other_active_crtc->pipe);
intel_wait_for_vblank(dev, other_active_crtc->pipe);
}
 
static void haswell_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
4398,29 → 4947,30
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
if (WARN_ON(intel_crtc->active))
return;
 
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_enable_shared_dpll(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
 
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
intel_crtc->config.pixel_multiplier - 1);
if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
intel_crtc->config->pixel_multiplier - 1);
}
 
if (intel_crtc->config.has_pch_encoder) {
if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n, NULL);
&intel_crtc->config->fdi_m_n, NULL);
}
 
haswell_set_pipeconf(crtc);
4430,19 → 4980,23
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
for_each_encoder_on_crtc(dev, crtc, encoder) {
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
if (encoder->pre_enable)
encoder->pre_enable(encoder);
}
 
if (intel_crtc->config.has_pch_encoder) {
if (intel_crtc->config->has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
dev_priv->display.fdi_link_train(crtc);
}
 
if (!is_dsi)
intel_ddi_enable_pipe_clock(intel_crtc);
 
if (IS_SKYLAKE(dev))
if (INTEL_INFO(dev)->gen >= 9)
skylake_pfit_enable(intel_crtc);
else
ironlake_pfit_enable(intel_crtc);
4454,47 → 5008,36
intel_crtc_load_lut(crtc);
 
intel_ddi_set_pipe_settings(crtc);
if (!is_dsi)
intel_ddi_enable_transcoder_func(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder)
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
 
if (intel_crtc->config.dp_encoder_is_mst)
if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
intel_ddi_set_vc_payload_alloc(crtc, true);
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
}
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
intel_crtc_enable_planes(crtc);
hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
intel_wait_for_vblank(dev, hsw_workaround_pipe);
intel_wait_for_vblank(dev, hsw_workaround_pipe);
}
 
static void skylake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), 0);
I915_WRITE(PS_WIN_POS(pipe), 0);
I915_WRITE(PS_WIN_SZ(pipe), 0);
}
}
 
static void ironlake_pfit_disable(struct intel_crtc *crtc)
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
4502,7 → 5045,7
 
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (crtc->config.pch_pfit.enabled) {
if (force || crtc->config->pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
4518,31 → 5061,27
int pipe = intel_crtc->pipe;
u32 reg, temp;
 
if (!intel_crtc->active)
return;
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
intel_crtc_disable_planes(crtc);
 
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
if (intel_crtc->config.has_pch_encoder)
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_disable_pipe(intel_crtc);
 
ironlake_pfit_disable(intel_crtc);
ironlake_pfit_disable(intel_crtc, false);
 
if (intel_crtc->config->has_pch_encoder)
ironlake_fdi_disable(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (intel_crtc->config.has_pch_encoder) {
ironlake_fdi_disable(crtc);
 
if (intel_crtc->config->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
 
if (HAS_PCH_CPT(dev)) {
4560,18 → 5099,8
I915_WRITE(PCH_DPLL_SEL, temp);
}
 
/* disable PCH DPLL */
intel_disable_shared_dpll(intel_crtc);
 
ironlake_fdi_pll_disable(intel_crtc);
}
 
intel_crtc->active = false;
intel_update_watermarks(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void haswell_crtc_disable(struct drm_crtc *crtc)
4580,39 → 5109,37
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (!intel_crtc->active)
return;
 
intel_crtc_disable_planes(crtc);
 
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
}
 
if (intel_crtc->config.has_pch_encoder)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
intel_disable_pipe(intel_crtc);
 
if (intel_crtc->config.dp_encoder_is_mst)
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
 
if (!is_dsi)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
if (IS_SKYLAKE(dev))
skylake_pfit_disable(intel_crtc);
if (INTEL_INFO(dev)->gen >= 9)
skylake_scaler_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc);
ironlake_pfit_disable(intel_crtc, false);
 
if (!is_dsi)
intel_ddi_disable_pipe_clock(intel_crtc);
 
if (intel_crtc->config.has_pch_encoder) {
if (intel_crtc->config->has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_ddi_fdi_disable(crtc);
}
4620,32 → 5147,15
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
intel_crtc->active = false;
intel_update_watermarks(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_disable_shared_dpll(intel_crtc);
}
 
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_put_shared_dpll(intel_crtc);
}
 
 
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_config *pipe_config = &crtc->config;
struct intel_crtc_state *pipe_config = crtc->config;
 
if (!crtc->config.gmch_pfit.control)
if (!pipe_config->gmch_pfit.control)
return;
 
/*
4674,12 → 5184,34
return POWER_DOMAIN_PORT_DDI_C_4_LANES;
case PORT_D:
return POWER_DOMAIN_PORT_DDI_D_4_LANES;
case PORT_E:
return POWER_DOMAIN_PORT_DDI_E_2_LANES;
default:
WARN_ON_ONCE(1);
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
}
}
 
static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
{
switch (port) {
case PORT_A:
return POWER_DOMAIN_AUX_A;
case PORT_B:
return POWER_DOMAIN_AUX_B;
case PORT_C:
return POWER_DOMAIN_AUX_C;
case PORT_D:
return POWER_DOMAIN_AUX_D;
case PORT_E:
/* FIXME: Check VBT for actual wiring of PORT E */
return POWER_DOMAIN_AUX_D;
default:
MISSING_CASE(port);
return POWER_DOMAIN_AUX_A;
}
}
 
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
4711,6 → 5243,36
}
}
 
enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct intel_digital_port *intel_dig_port;
 
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_HDMI:
/*
* Only DDI platforms should ever use these output types.
* We can get here after the HDMI detect code has already set
* the type of the shared encoder. Since we can't be sure
* what's the status of the given connectors, play safe and
* run the DP detection too.
*/
WARN_ON_ONCE(!HAS_DDI(dev));
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
return port_to_aux_power_domain(intel_dig_port->port);
case INTEL_OUTPUT_DP_MST:
intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
return port_to_aux_power_domain(intel_dig_port->port);
default:
MISSING_CASE(intel_encoder->type);
return POWER_DOMAIN_AUX_A;
}
}
 
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
4720,12 → 5282,15
unsigned long mask;
enum transcoder transcoder;
 
if (!crtc->state->active)
return 0;
 
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (intel_crtc->config.pch_pfit.enabled ||
intel_crtc->config.pch_pfit.force_thru)
if (intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4734,64 → 5299,131
return mask;
}
 
static void modeset_update_crtc_power_domains(struct drm_device *dev)
static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
 
/*
* First get all needed power domains, then put all unneeded, to avoid
* any unnecessary toggling of the power wells.
*/
for_each_intel_crtc(dev, crtc) {
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain domain;
unsigned long domains, new_domains, old_domains;
 
if (!crtc->base.enabled)
continue;
old_domains = intel_crtc->enabled_power_domains;
intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
 
pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
domains = new_domains & ~old_domains;
 
for_each_power_domain(domain, pipe_domains[crtc->pipe])
for_each_power_domain(domain, domains)
intel_display_power_get(dev_priv, domain);
 
return old_domains & ~new_domains;
}
 
if (dev_priv->display.modeset_global_resources)
dev_priv->display.modeset_global_resources(dev);
 
for_each_intel_crtc(dev, crtc) {
static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
unsigned long domains)
{
enum intel_display_power_domain domain;
 
for_each_power_domain(domain, crtc->enabled_power_domains)
for_each_power_domain(domain, domains)
intel_display_power_put(dev_priv, domain);
}
 
crtc->enabled_power_domains = pipe_domains[crtc->pipe];
static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long put_domains[I915_MAX_PIPES] = {};
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i;
 
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (needs_modeset(crtc->state))
put_domains[to_intel_crtc(crtc)->pipe] =
modeset_get_crtc_power_domains(crtc);
}
 
intel_display_set_init_power(dev_priv, false);
if (dev_priv->display.modeset_commit_cdclk) {
unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
 
if (cdclk != dev_priv->cdclk_freq &&
!WARN_ON(!state->allow_modeset))
dev_priv->display.modeset_commit_cdclk(state);
}
 
/* returns HPLL frequency in kHz */
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
for (i = 0; i < I915_MAX_PIPES; i++)
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
}
 
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
int max_cdclk_freq = dev_priv->max_cdclk_freq;
 
/* Obtain SKU information */
mutex_lock(&dev_priv->dpio_lock);
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->dpio_lock);
if (INTEL_INFO(dev_priv)->gen >= 9 ||
IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;
else if (INTEL_INFO(dev_priv)->gen < 4)
return 2*max_cdclk_freq*90/100;
else
return max_cdclk_freq*90/100;
}
 
return vco_freq[hpll_freq] * 1000;
static void intel_update_max_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_SKYLAKE(dev)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
 
if (limit == SKL_DFSM_CDCLK_LIMIT_675)
dev_priv->max_cdclk_freq = 675000;
else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
dev_priv->max_cdclk_freq = 540000;
else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
dev_priv->max_cdclk_freq = 450000;
else
dev_priv->max_cdclk_freq = 337500;
} else if (IS_BROADWELL(dev)) {
/*
* FIXME with extra cooling we can allow
* 540 MHz for ULX and 675 Mhz for ULT.
* How can we know if extra cooling is
* available? PCI ID, VTB, something else?
*/
if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->max_cdclk_freq = 450000;
else if (IS_BDW_ULX(dev))
dev_priv->max_cdclk_freq = 450000;
else if (IS_BDW_ULT(dev))
dev_priv->max_cdclk_freq = 540000;
else
dev_priv->max_cdclk_freq = 675000;
} else if (IS_CHERRYVIEW(dev)) {
dev_priv->max_cdclk_freq = 320000;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
}
 
static void vlv_update_cdclk(struct drm_device *dev)
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
 
DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
dev_priv->max_cdclk_freq);
 
DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
dev_priv->max_dotclk_freq);
}
 
static void intel_update_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
dev_priv->vlv_cdclk_freq);
dev_priv->cdclk_freq);
 
/*
* Program the gmbus_freq based on the cdclk frequency.
4798,9 → 5430,412
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
if (IS_VALLEYVIEW(dev)) {
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
}
 
if (dev_priv->max_cdclk_freq == 0)
intel_update_max_cdclk(dev);
}
 
static void broxton_set_cdclk(struct drm_device *dev, int frequency)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t divider;
uint32_t ratio;
uint32_t current_freq;
int ret;
 
/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
switch (frequency) {
case 144000:
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
ratio = BXT_DE_PLL_RATIO(60);
break;
case 288000:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
ratio = BXT_DE_PLL_RATIO(60);
break;
case 384000:
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
ratio = BXT_DE_PLL_RATIO(60);
break;
case 576000:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
ratio = BXT_DE_PLL_RATIO(60);
break;
case 624000:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
ratio = BXT_DE_PLL_RATIO(65);
break;
case 19200:
/*
* Bypass frequency with DE PLL disabled. Init ratio, divider
* to suppress GCC warning.
*/
ratio = 0;
divider = 0;
break;
default:
DRM_ERROR("unsupported CDCLK freq %d", frequency);
 
return;
}
 
mutex_lock(&dev_priv->rps.hw_lock);
/* Inform power controller of upcoming frequency change */
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000);
mutex_unlock(&dev_priv->rps.hw_lock);
 
if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
ret, frequency);
return;
}
 
current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
current_freq = current_freq * 500 + 1000;
 
/*
* DE PLL has to be disabled when
* - setting to 19.2MHz (bypass, PLL isn't used)
* - before setting to 624MHz (PLL needs toggling)
* - before setting to any frequency from 624MHz (PLL needs toggling)
*/
if (frequency == 19200 || frequency == 624000 ||
current_freq == 624000) {
I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
1))
DRM_ERROR("timout waiting for DE PLL unlock\n");
}
 
if (frequency != 19200) {
uint32_t val;
 
val = I915_READ(BXT_DE_PLL_CTL);
val &= ~BXT_DE_PLL_RATIO_MASK;
val |= ratio;
I915_WRITE(BXT_DE_PLL_CTL, val);
 
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL lock\n");
 
val = I915_READ(CDCLK_CTL);
val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
val |= divider;
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (frequency >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
 
val &= ~CDCLK_FREQ_DECIMAL_MASK;
/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
val |= (frequency - 1000) / 500;
I915_WRITE(CDCLK_CTL, val);
}
 
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
DIV_ROUND_UP(frequency, 25000));
mutex_unlock(&dev_priv->rps.hw_lock);
 
if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
ret, frequency);
return;
}
 
intel_update_cdclk(dev);
}
 
void broxton_init_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
 
/*
* NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
* or else the reset will hang because there is no PCH to respond.
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
val = I915_READ(HSW_NDE_RSTWRN_OPT);
val &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
 
/* Enable PG1 for cdclk */
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
/* check if cd clock is enabled */
if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
DRM_DEBUG_KMS("Display already initialized\n");
return;
}
 
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
* - check if setting the max (or any) cdclk freq is really necessary
* here, it belongs to modeset time
*/
broxton_set_cdclk(dev, 624000);
 
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
 
udelay(10);
 
if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power enable timeout!\n");
}
 
void broxton_uninit_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
 
udelay(10);
 
if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
DRM_ERROR("DBuf power disable timeout!\n");
 
/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
broxton_set_cdclk(dev, 19200);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
 
static const struct skl_cdclk_entry {
unsigned int freq;
unsigned int vco;
} skl_cdclk_frequencies[] = {
{ .freq = 308570, .vco = 8640 },
{ .freq = 337500, .vco = 8100 },
{ .freq = 432000, .vco = 8640 },
{ .freq = 450000, .vco = 8100 },
{ .freq = 540000, .vco = 8100 },
{ .freq = 617140, .vco = 8640 },
{ .freq = 675000, .vco = 8100 },
};
 
static unsigned int skl_cdclk_decimal(unsigned int freq)
{
return (freq - 1000) / 500;
}
 
static unsigned int skl_cdclk_get_vco(unsigned int freq)
{
unsigned int i;
 
for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
 
if (e->freq == freq)
return e->vco;
}
 
return 8100;
}
 
static void
skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
{
unsigned int min_freq;
u32 val;
 
/* select the minimum CDCLK before enabling DPLL 0 */
val = I915_READ(CDCLK_CTL);
val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
val |= CDCLK_FREQ_337_308;
 
if (required_vco == 8640)
min_freq = 308570;
else
min_freq = 337500;
 
val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
 
I915_WRITE(CDCLK_CTL, val);
POSTING_READ(CDCLK_CTL);
 
/*
* We always enable DPLL0 with the lowest link rate possible, but still
* taking into account the VCO required to operate the eDP panel at the
* desired frequency. The usual DP link rates operate with a VCO of
* 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
* The modeset code is responsible for the selection of the exact link
* rate later on, with the constraint of choosing a frequency that
* works with required_vco.
*/
val = I915_READ(DPLL_CTRL1);
 
val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
if (required_vco == 8640)
val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
SKL_DPLL0);
else
val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
SKL_DPLL0);
 
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
 
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
 
if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
DRM_ERROR("DPLL0 not locked\n");
}
 
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
{
int ret;
u32 val;
 
/* inform PCU we want to change CDCLK */
val = SKL_CDCLK_PREPARE_FOR_CHANGE;
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
mutex_unlock(&dev_priv->rps.hw_lock);
 
return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
}
 
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
{
unsigned int i;
 
for (i = 0; i < 15; i++) {
if (skl_cdclk_pcu_ready(dev_priv))
return true;
udelay(10);
}
 
return false;
}
 
static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
{
struct drm_device *dev = dev_priv->dev;
u32 freq_select, pcu_ack;
 
DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
 
if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
DRM_ERROR("failed to inform PCU about cdclk change\n");
return;
}
 
/* set CDCLK_CTL */
switch(freq) {
case 450000:
case 432000:
freq_select = CDCLK_FREQ_450_432;
pcu_ack = 1;
break;
case 540000:
freq_select = CDCLK_FREQ_540;
pcu_ack = 2;
break;
case 308570:
case 337500:
default:
freq_select = CDCLK_FREQ_337_308;
pcu_ack = 0;
break;
case 617140:
case 675000:
freq_select = CDCLK_FREQ_675_617;
pcu_ack = 3;
break;
}
 
I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
POSTING_READ(CDCLK_CTL);
 
/* inform PCU of the change */
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
mutex_unlock(&dev_priv->rps.hw_lock);
 
intel_update_cdclk(dev);
}
 
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
/* disable DBUF power */
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
 
udelay(10);
 
if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
DRM_ERROR("DBuf power disable timeout\n");
 
/*
* DMC assumes ownership of LCPLL and will get confused if we touch it.
*/
if (dev_priv->csr.dmc_payload) {
/* disable DPLL0 */
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
~LCPLL_PLL_ENABLE);
if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
DRM_ERROR("Couldn't disable DPLL0\n");
}
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
 
void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
u32 val;
unsigned int required_vco;
 
/* enable PCH reset handshake */
val = I915_READ(HSW_NDE_RSTWRN_OPT);
I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
 
/* enable PG1 and Misc I/O */
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
/* DPLL0 not enabled (happens on early BIOS versions) */
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
/* enable DPLL0 */
required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
skl_dpll0_enable(dev_priv, required_vco);
}
 
/* set CDCLK to the frequency the BIOS chose */
skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
 
/* enable DBUF power */
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
 
udelay(10);
 
if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power enable timeout\n");
}
 
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
4807,7 → 5842,8
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
 
WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
WARN_ON(dev_priv->display.get_display_clock_speed(dev)
!= dev_priv->cdclk_freq);
 
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
4828,26 → 5864,25
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
mutex_lock(&dev_priv->sb_lock);
 
if (cdclk == 400000) {
u32 divider;
 
divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
 
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~DISPLAY_FREQUENCY_VALUES;
val &= ~CCK_FREQUENCY_VALUES;
val |= divider;
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
 
if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
50))
DRM_ERROR("timed out waiting for CDclk change\n");
mutex_unlock(&dev_priv->dpio_lock);
}
 
mutex_lock(&dev_priv->dpio_lock);
/* adjust self-refresh exit latency value */
val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
val &= ~0x7f;
4861,9 → 5896,10
else
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
mutex_unlock(&dev_priv->dpio_lock);
 
vlv_update_cdclk(dev);
mutex_unlock(&dev_priv->sb_lock);
 
intel_update_cdclk(dev);
}
 
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
4871,27 → 5907,27
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
 
WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
WARN_ON(dev_priv->display.get_display_clock_speed(dev)
!= dev_priv->cdclk_freq);
 
switch (cdclk) {
case 400000:
cmd = 3;
break;
case 333333:
case 320000:
cmd = 2;
break;
case 266667:
cmd = 1;
break;
case 200000:
cmd = 0;
break;
default:
WARN_ON(1);
MISSING_CASE(cdclk);
return;
}
 
/*
* Specs are full of misinformation, but testing on actual
* hardware has shown that we just need to write the desired
* CCK divider into the Punit register.
*/
cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
 
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
4904,7 → 5940,7
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
vlv_update_cdclk(dev);
intel_update_cdclk(dev);
}
 
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4911,27 → 5947,25
int max_pixclk)
{
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
 
/* FIXME: Punit isn't quite ready yet */
if (IS_CHERRYVIEW(dev_priv->dev))
return 400000;
 
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
* 320/333MHz (depends on HPLL freq)
* 400MHz
* So we check to see whether we're above 90% of the lower bin and
* adjust if needed.
* 400MHz (VLV only)
* So we check to see whether we're above 90% (VLV) or 95% (CHV)
* of the lower bin and adjust if needed.
*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
if (max_pixclk > freq_320*9/10)
if (!IS_CHERRYVIEW(dev_priv) &&
max_pixclk > freq_320*limit/100)
return 400000;
else if (max_pixclk > 266667*9/10)
else if (max_pixclk > 266667*limit/100)
return freq_320;
else if (max_pixclk > 0)
return 266667;
4939,47 → 5973,123
return 200000;
}
 
/* compute the max pixel clock for new configuration */
static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
struct drm_device *dev = dev_priv->dev;
/*
* FIXME:
* - remove the guardband, it's not needed on BXT
* - set 19.2MHz bypass frequency if there are no active pipes
*/
if (max_pixclk > 576000*9/10)
return 624000;
else if (max_pixclk > 384000*9/10)
return 576000;
else if (max_pixclk > 288000*9/10)
return 384000;
else if (max_pixclk > 144000*9/10)
return 288000;
else
return 144000;
}
 
/* Compute the max pixel clock for new configuration. Uses atomic state if
* that's non-NULL, look at current state otherwise. */
static int intel_mode_max_pixclk(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
int max_pixclk = 0;
 
for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->new_enabled)
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
 
if (!crtc_state->base.enable)
continue;
 
max_pixclk = max(max_pixclk,
intel_crtc->new_config->adjusted_mode.crtc_clock);
crtc_state->base.adjusted_mode.crtc_clock);
}
 
return max_pixclk;
}
 
static void valleyview_modeset_global_pipes(struct drm_device *dev,
unsigned *prepare_pipes)
static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int max_pixclk = intel_mode_max_pixclk(dev, state);
 
if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
dev_priv->vlv_cdclk_freq)
return;
if (max_pixclk < 0)
return max_pixclk;
 
/* disable/enable all currently active pipes while we change cdclk */
for_each_intel_crtc(dev, intel_crtc)
if (intel_crtc->base.enabled)
*prepare_pipes |= (1 << intel_crtc->pipe);
to_intel_atomic_state(state)->cdclk =
valleyview_calc_cdclk(dev_priv, max_pixclk);
 
return 0;
}
 
static void valleyview_modeset_global_resources(struct drm_device *dev)
static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
int max_pixclk = intel_mode_max_pixclk(dev, state);
 
if (req_cdclk != dev_priv->vlv_cdclk_freq) {
if (max_pixclk < 0)
return max_pixclk;
 
to_intel_atomic_state(state)->cdclk =
broxton_calc_cdclk(dev_priv, max_pixclk);
 
return 0;
}
 
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
{
unsigned int credits, default_credits;
 
if (IS_CHERRYVIEW(dev_priv))
default_credits = PFI_CREDIT(12);
else
default_credits = PFI_CREDIT(8);
 
if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
/* CHV suggested value is 31 or 63 */
if (IS_CHERRYVIEW(dev_priv))
credits = PFI_CREDIT_63;
else
credits = PFI_CREDIT(15);
} else {
credits = default_credits;
}
 
/*
* WA - write default credits before re-programming
* FIXME: should we also set the resend bit here?
*/
I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
default_credits);
 
I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
credits | PFI_CREDIT_RESEND);
 
/*
* FIXME is this guaranteed to clear
* immediately or should we poll for it?
*/
WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
}
 
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* FIXME: We can end up here with all power domains off, yet
* with a CDCLK frequency other than the minimum. To account
* for this take the PIPE-A power domain, which covers the HW
4995,9 → 6105,10
else
valleyview_set_cdclk(dev, req_cdclk);
 
vlv_program_pfi_credits(dev_priv);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
}
 
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
5008,23 → 6119,14
int pipe = intel_crtc->pipe;
bool is_dsi;
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
if (WARN_ON(intel_crtc->active))
return;
 
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
chv_prepare_pll(intel_crtc, &intel_crtc->config);
else
vlv_prepare_pll(intel_crtc, &intel_crtc->config);
}
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_set_pipe_timings(intel_crtc);
 
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
5045,11 → 6147,14
encoder->pre_pll_enable(encoder);
 
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
chv_enable_pll(intel_crtc, &intel_crtc->config);
else
vlv_enable_pll(intel_crtc, &intel_crtc->config);
if (IS_CHERRYVIEW(dev)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
} else {
vlv_prepare_pll(intel_crtc, intel_crtc->config);
vlv_enable_pll(intel_crtc, intel_crtc->config);
}
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
5059,19 → 6164,13
 
intel_crtc_load_lut(crtc);
 
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
intel_crtc_enable_planes(crtc);
 
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev_priv);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
 
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
5079,8 → 6178,8
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
}
 
static void i9xx_crtc_enable(struct drm_crtc *crtc)
5091,15 → 6190,13
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
if (WARN_ON(intel_crtc->active))
return;
 
i9xx_set_pll_dividers(intel_crtc);
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
 
intel_set_pipe_timings(intel_crtc);
 
5123,26 → 6220,11
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
intel_crtc_enable_planes(crtc);
 
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So don't enable underrun reporting before at least some planes
* are enabled.
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev_priv);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
 
static void i9xx_pfit_disable(struct intel_crtc *crtc)
5150,7 → 6232,7
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!crtc->config.gmch_pfit.control)
if (!crtc->config->gmch_pfit.control)
return;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
5168,31 → 6250,7
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
 
if (!intel_crtc->active)
return;
 
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So diasble underrun reporting before all the planes get disabled.
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
* moment. So to make sure the plane gets truly disabled, disable
* first the self-refresh mode. The self-refresh enable bit in turn
* will be checked/applied by the HW only at the next frame start
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
 
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
* We also need to wait on all gmch platforms because of the
5200,12 → 6258,12
*/
intel_wait_for_vblank(dev, pipe);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
intel_disable_pipe(intel_crtc);
 
i9xx_pfit_disable(intel_crtc);
5223,42 → 6281,35
i9xx_disable_pll(intel_crtc);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_pll_disable)
encoder->post_pll_disable(encoder);
 
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_crtc->active = false;
intel_update_watermarks(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void i9xx_crtc_off(struct drm_crtc *crtc)
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
{
}
 
/* Master function to enable/disable CRTC and corresponding power wells */
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
unsigned long domains;
 
if (enable) {
if (!intel_crtc->active) {
domains = get_crtc_power_domains(crtc);
for_each_power_domain(domain, domains)
intel_display_power_get(dev_priv, domain);
intel_crtc->enabled_power_domains = domains;
if (!intel_crtc->active)
return;
 
dev_priv->display.crtc_enable(crtc);
if (to_intel_plane_state(crtc->primary->state)->visible) {
intel_pre_disable_primary(crtc);
 
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
to_intel_plane_state(crtc->primary->state)->visible = false;
}
} else {
if (intel_crtc->active) {
 
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_update_watermarks(crtc);
intel_disable_shared_dpll(intel_crtc);
 
domains = intel_crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
5265,60 → 6316,65
intel_display_power_put(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
}
}
}
 
/**
* Sets the power management mode of the pipe and plane.
/*
* turn all crtc's off, but do not adjust state
* This has to be paired with a call to intel_modeset_setup_hw_state.
*/
void intel_crtc_update_dpms(struct drm_crtc *crtc)
int intel_display_suspend(struct drm_device *dev)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
bool enable = false;
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
struct drm_atomic_state *state;
struct drm_crtc *crtc;
unsigned crtc_mask = 0;
int ret = 0;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
enable |= intel_encoder->connectors_active;
if (WARN_ON(!ctx))
return 0;
 
intel_crtc_control(crtc, enable);
}
lockdep_assert_held(&ctx->ww_ctx);
state = drm_atomic_state_alloc(dev);
if (WARN_ON(!state))
return -ENOMEM;
 
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
state->acquire_ctx = ctx;
state->allow_modeset = true;
 
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
for_each_crtc(dev, crtc) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_crtc_state(state, crtc);
 
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
goto free;
 
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
i915_gem_track_fb(old_obj, NULL,
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = NULL;
if (!crtc_state->active)
continue;
 
crtc_state->active = false;
crtc_mask |= 1 << drm_crtc_index(crtc);
}
 
/* Update computed state. */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder || !connector->encoder->crtc)
continue;
if (crtc_mask) {
ret = drm_atomic_commit(state);
 
if (connector->encoder->crtc != crtc)
continue;
if (!ret) {
for_each_crtc(dev, crtc)
if (crtc_mask & (1 << drm_crtc_index(crtc)))
crtc->state->active = true;
 
connector->dpms = DRM_MODE_DPMS_OFF;
to_intel_encoder(connector->encoder)->connectors_active = false;
return ret;
}
}
 
free:
if (ret)
DRM_ERROR("Suspending crtc's failed with %i\n", ret);
drm_atomic_state_free(state);
return ret;
}
 
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5327,84 → 6383,73
kfree(intel_encoder);
}
 
/* Simple dpms helper for encoders with just one connector, no cloning and only
* one kind of off state. It clamps all !ON modes to fully OFF and changes the
* state of the entire output pipe. */
static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
if (mode == DRM_MODE_DPMS_ON) {
encoder->connectors_active = true;
 
intel_crtc_update_dpms(encoder->base.crtc);
} else {
encoder->connectors_active = false;
 
intel_crtc_update_dpms(encoder->base.crtc);
}
}
 
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
static void intel_connector_check_state(struct intel_connector *connector)
{
if (connector->get_hw_state(connector)) {
struct intel_encoder *encoder = connector->encoder;
struct drm_crtc *crtc;
bool encoder_enabled;
enum pipe pipe;
struct drm_crtc *crtc = connector->base.state->crtc;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.base.id,
connector->base.name);
 
/* there is no real hw state for MST connectors */
if (connector->mst_port)
if (connector->get_hw_state(connector)) {
struct intel_encoder *encoder = connector->encoder;
struct drm_connector_state *conn_state = connector->base.state;
 
I915_STATE_WARN(!crtc,
"connector enabled without attached crtc\n");
 
if (!crtc)
return;
 
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
I915_STATE_WARN(!crtc->state->active,
"connector is active, but attached crtc isn't\n");
 
if (encoder) {
WARN(!encoder->connectors_active,
"encoder->connectors_active not set\n");
 
encoder_enabled = encoder->get_hw_state(encoder, &pipe);
WARN(!encoder_enabled, "encoder not enabled\n");
if (WARN_ON(!encoder->base.crtc))
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
return;
 
crtc = encoder->base.crtc;
I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
"atomic encoder doesn't match attached encoder\n");
 
WARN(!crtc->enabled, "crtc not enabled\n");
WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n");
I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
"attached encoder crtc differs from connector crtc\n");
} else {
I915_STATE_WARN(crtc && crtc->state->active,
"attached crtc is active, but connector isn't\n");
I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
"best encoder set without crtc!\n");
}
}
}
 
/* Even simpler default implementation, if there's really no special case to
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
int intel_connector_init(struct intel_connector *connector)
{
/* All the simple cases only support two dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
struct drm_connector_state *connector_state;
 
if (mode == connector->dpms)
return;
connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
if (!connector_state)
return -ENOMEM;
 
connector->dpms = mode;
connector->base.state = connector_state;
return 0;
}
 
/* Only need to change hw state when actually enabled */
if (connector->encoder)
intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
struct intel_connector *intel_connector_alloc(void)
{
struct intel_connector *connector;
 
intel_modeset_check_state(connector->dev);
connector = kzalloc(sizeof *connector, GFP_KERNEL);
if (!connector)
return NULL;
 
if (intel_connector_init(connector) < 0) {
kfree(connector);
return NULL;
}
 
return connector;
}
 
/* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state
* of the connector. */
5416,19 → 6461,27
return encoder->get_hw_state(encoder, &pipe);
}
 
static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_config *pipe_config)
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
if (crtc_state->base.enable && crtc_state->has_pch_encoder)
return crtc_state->fdi_lanes;
 
return 0;
}
 
static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
struct drm_atomic_state *state = pipe_config->base.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
 
DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
return -EINVAL;
}
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5435,40 → 6488,53
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
return false;
return -EINVAL;
} else {
return true;
return 0;
}
}
 
if (INTEL_INFO(dev)->num_pipes == 2)
return true;
return 0;
 
/* Ivybridge 3 pipe is really complicated */
switch (pipe) {
case PIPE_A:
return true;
return 0;
case PIPE_B:
if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
pipe_config->fdi_lanes > 2) {
if (pipe_config->fdi_lanes <= 2)
return 0;
 
other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
return PTR_ERR(other_crtc_state);
 
if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
return -EINVAL;
}
return true;
return 0;
case PIPE_C:
if (!pipe_has_enabled_pch(pipe_B_crtc) ||
pipe_B_crtc->config.fdi_lanes <= 2) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return false;
return -EINVAL;
}
} else {
 
other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
return PTR_ERR(other_crtc_state);
 
if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
return -EINVAL;
}
return true;
return 0;
default:
BUG();
}
5476,12 → 6542,12
 
#define RETRY 1
static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int lane, link_bw, fdi_dotclock;
bool setup_ok, needs_recompute = false;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int lane, link_bw, fdi_dotclock, ret;
bool needs_recompute = false;
 
retry:
/* FDI is a binary signal running at ~2.7GHz, encoding
5503,9 → 6569,9
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n);
 
setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
intel_crtc->pipe, pipe_config);
if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
pipe_config->pipe_bpp);
5518,28 → 6584,51
if (needs_recompute)
return RETRY;
 
return setup_ok ? 0 : -EINVAL;
return ret;
}
 
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config)
{
if (pipe_config->pipe_bpp > 24)
return false;
 
/* HSW can handle pixel rate up to cdclk? */
if (IS_HASWELL(dev_priv->dev))
return true;
 
/*
* We compare against max which means we must take
* the increased cdclk requirement into account when
* calculating the new cdclk.
*
* Should measure whether using a lower cdclk w/o IPS
*/
return ilk_pipe_pixel_rate(pipe_config) <=
dev_priv->max_cdclk_freq * 95 / 100;
}
 
static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config->pipe_bpp <= 24;
pipe_config_supports_ips(dev_priv, pipe_config);
}
 
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
int clock_limit =
dev_priv->display.get_display_clock_speed(dev);
int clock_limit = dev_priv->max_cdclk_freq;
 
/*
* Enable pixel doubling when the dot clock
5564,7 → 6653,7
* - LVDS dual channel mode
* - Double wide pipe
*/
if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
 
5572,17 → 6661,9
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
adjusted_mode->hsync_start == adjusted_mode->hdisplay)
adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
return -EINVAL;
 
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
/* only a 8bpc pipe, with 6bpc dither through the panel fitter
* for lvds. */
pipe_config->pipe_bpp = 8*3;
}
 
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
 
5592,32 → 6673,130
return 0;
}
 
static int valleyview_get_display_clock_speed(struct drm_device *dev)
static int skylake_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
int divider;
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
uint32_t cdctl = I915_READ(CDCLK_CTL);
uint32_t linkrate;
 
/* FIXME: Punit isn't quite ready yet */
if (IS_CHERRYVIEW(dev))
return 400000;
if (!(lcpll1 & LCPLL_PLL_ENABLE))
return 24000; /* 24MHz is the cd freq with NSSC ref */
 
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
return 540000;
 
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
linkrate = (I915_READ(DPLL_CTRL1) &
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
 
divider = val & DISPLAY_FREQUENCY_VALUES;
if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
linkrate == DPLL_CTRL1_LINK_RATE_1080) {
/* vco 8640 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 432000;
case CDCLK_FREQ_337_308:
return 308570;
case CDCLK_FREQ_675_617:
return 617140;
default:
WARN(1, "Unknown cd freq selection\n");
}
} else {
/* vco 8100 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 450000;
case CDCLK_FREQ_337_308:
return 337500;
case CDCLK_FREQ_675_617:
return 675000;
default:
WARN(1, "Unknown cd freq selection\n");
}
}
 
WARN((val & DISPLAY_FREQUENCY_STATUS) !=
(divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
"cdclk change in progress\n");
/* error case, do as if DPLL0 isn't enabled */
return 24000;
}
 
return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
static int broxton_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t cdctl = I915_READ(CDCLK_CTL);
uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
int cdclk;
 
if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
return 19200;
 
cdclk = 19200 * pll_ratio / 2;
 
switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
return cdclk; /* 576MHz or 624MHz */
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
return cdclk * 2 / 3; /* 384MHz */
case BXT_CDCLK_CD2X_DIV_SEL_2:
return cdclk / 2; /* 288MHz */
case BXT_CDCLK_CD2X_DIV_SEL_4:
return cdclk / 4; /* 144MHz */
}
 
/* error case, do as if DE PLL isn't enabled */
return 19200;
}
 
static int broadwell_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
if (lcpll & LCPLL_CD_SOURCE_FCLK)
return 800000;
else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
else if (freq == LCPLL_CLK_FREQ_54O_BDW)
return 540000;
else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
return 337500;
else
return 675000;
}
 
static int haswell_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
if (lcpll & LCPLL_CD_SOURCE_FCLK)
return 800000;
else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
else if (IS_HSW_ULT(dev))
return 337500;
else
return 540000;
}
 
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
CCK_DISPLAY_CLOCK_CONTROL);
}
 
static int ilk_get_display_clock_speed(struct drm_device *dev)
{
return 450000;
}
 
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
5625,7 → 6804,7
 
static int i915_get_display_clock_speed(struct drm_device *dev)
{
return 333000;
return 333333;
}
 
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5641,19 → 6820,19
 
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_267_MHZ_PNV:
return 267000;
return 266667;
case GC_DISPLAY_CLOCK_333_MHZ_PNV:
return 333000;
return 333333;
case GC_DISPLAY_CLOCK_444_MHZ_PNV:
return 444000;
return 444444;
case GC_DISPLAY_CLOCK_200_MHZ_PNV:
return 200000;
default:
DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
return 133000;
return 133333;
case GC_DISPLAY_CLOCK_167_MHZ_PNV:
return 167000;
return 166667;
}
}
 
5664,11 → 6843,11
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
 
if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
return 133000;
return 133333;
else {
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_MHZ:
return 333000;
return 333333;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
return 190000;
5678,23 → 6857,40
 
static int i865_get_display_clock_speed(struct drm_device *dev)
{
return 266000;
return 266667;
}
 
static int i855_get_display_clock_speed(struct drm_device *dev)
static int i85x_get_display_clock_speed(struct drm_device *dev)
{
u16 hpllcc = 0;
 
/*
* 852GM/852GMV only supports 133 MHz and the HPLLCC
* encoding is different :(
* FIXME is this the right way to detect 852GM/852GMV?
*/
if (dev->pdev->revision == 0x1)
return 133333;
 
// pci_bus_read_config_word(dev->pdev->bus,
// PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
 
/* Assume that the hardware is in the high speed state. This
* should be the default.
*/
switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
case GC_CLOCK_133_200:
case GC_CLOCK_133_200_2:
case GC_CLOCK_100_200:
return 200000;
case GC_CLOCK_166_250:
return 250000;
case GC_CLOCK_100_133:
return 133000;
return 133333;
case GC_CLOCK_133_266:
case GC_CLOCK_133_266_2:
case GC_CLOCK_166_266:
return 266667;
}
 
/* Shouldn't happen */
5703,9 → 6899,178
 
static int i830_get_display_clock_speed(struct drm_device *dev)
{
return 133000;
return 133333;
}
 
static unsigned int intel_hpll_vco(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
static const unsigned int blb_vco[8] = {
[0] = 3200000,
[1] = 4000000,
[2] = 5333333,
[3] = 4800000,
[4] = 6400000,
};
static const unsigned int pnv_vco[8] = {
[0] = 3200000,
[1] = 4000000,
[2] = 5333333,
[3] = 4800000,
[4] = 2666667,
};
static const unsigned int cl_vco[8] = {
[0] = 3200000,
[1] = 4000000,
[2] = 5333333,
[3] = 6400000,
[4] = 3333333,
[5] = 3566667,
[6] = 4266667,
};
static const unsigned int elk_vco[8] = {
[0] = 3200000,
[1] = 4000000,
[2] = 5333333,
[3] = 4800000,
};
static const unsigned int ctg_vco[8] = {
[0] = 3200000,
[1] = 4000000,
[2] = 5333333,
[3] = 6400000,
[4] = 2666667,
[5] = 4266667,
};
const unsigned int *vco_table;
unsigned int vco;
uint8_t tmp = 0;
 
/* FIXME other chipsets? */
if (IS_GM45(dev))
vco_table = ctg_vco;
else if (IS_G4X(dev))
vco_table = elk_vco;
else if (IS_CRESTLINE(dev))
vco_table = cl_vco;
else if (IS_PINEVIEW(dev))
vco_table = pnv_vco;
else if (IS_G33(dev))
vco_table = blb_vco;
else
return 0;
 
tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
 
vco = vco_table[tmp & 0x7];
if (vco == 0)
DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
else
DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
 
return vco;
}
 
static int gm45_get_display_clock_speed(struct drm_device *dev)
{
unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
uint16_t tmp = 0;
 
pci_read_config_word(dev->pdev, GCFGC, &tmp);
 
cdclk_sel = (tmp >> 12) & 0x1;
 
switch (vco) {
case 2666667:
case 4000000:
case 5333333:
return cdclk_sel ? 333333 : 222222;
case 3200000:
return cdclk_sel ? 320000 : 228571;
default:
DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
return 222222;
}
}
 
static int i965gm_get_display_clock_speed(struct drm_device *dev)
{
static const uint8_t div_3200[] = { 16, 10, 8 };
static const uint8_t div_4000[] = { 20, 12, 10 };
static const uint8_t div_5333[] = { 24, 16, 14 };
const uint8_t *div_table;
unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
uint16_t tmp = 0;
 
pci_read_config_word(dev->pdev, GCFGC, &tmp);
 
cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
 
if (cdclk_sel >= ARRAY_SIZE(div_3200))
goto fail;
 
switch (vco) {
case 3200000:
div_table = div_3200;
break;
case 4000000:
div_table = div_4000;
break;
case 5333333:
div_table = div_5333;
break;
default:
goto fail;
}
 
return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
 
fail:
DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
return 200000;
}
 
static int g33_get_display_clock_speed(struct drm_device *dev)
{
static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
const uint8_t *div_table;
unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
uint16_t tmp = 0;
 
pci_read_config_word(dev->pdev, GCFGC, &tmp);
 
cdclk_sel = (tmp >> 4) & 0x7;
 
if (cdclk_sel >= ARRAY_SIZE(div_3200))
goto fail;
 
switch (vco) {
case 3200000:
div_table = div_3200;
break;
case 4000000:
div_table = div_4000;
break;
case 4800000:
div_table = div_4800;
break;
case 5333333:
div_table = div_5333;
break;
default:
goto fail;
}
 
return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
 
fail:
DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
return 190476;
}
 
static void
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
{
5747,15 → 7112,18
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
 
static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
int num_connectors)
{
struct drm_device *dev = crtc->base.dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
 
if (IS_VALLEYVIEW(dev)) {
WARN_ON(!crtc_state->base.state);
 
if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
refclk = 100000;
} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5779,6 → 7147,7
}
 
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
5785,24 → 7154,24
u32 fp, fp2 = 0;
 
if (IS_PINEVIEW(dev)) {
fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
fp = pnv_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
if (reduced_clock)
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
 
crtc->new_config->dpll_hw_state.fp0 = fp;
crtc_state->dpll_hw_state.fp0 = fp;
 
crtc->lowfreq_avail = false;
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
crtc->new_config->dpll_hw_state.fp1 = fp2;
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
crtc->new_config->dpll_hw_state.fp1 = fp;
crtc_state->dpll_hw_state.fp1 = fp;
}
}
 
5855,7 → 7224,7
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config.cpu_transcoder;
enum transcoder transcoder = crtc->config->cpu_transcoder;
 
if (INTEL_INFO(dev)->gen >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5866,8 → 7235,8
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
crtc->config.has_drrs) {
if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
crtc->config->has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
5882,17 → 7251,33
}
}
 
void intel_dp_set_m_n(struct intel_crtc *crtc)
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
{
if (crtc->config.has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
 
if (m_n == M1_N1) {
dp_m_n = &crtc->config->dp_m_n;
dp_m2_n2 = &crtc->config->dp_m2_n2;
} else if (m_n == M2_N2) {
 
/*
* M2_N2 registers are not supported. Hence m2_n2 divider value
* needs to be programmed into M1_N1.
*/
dp_m_n = &crtc->config->dp_m2_n2;
} else {
DRM_ERROR("Unsupported divider value\n");
return;
}
 
if (crtc->config->has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
else
intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
&crtc->config.dp_m2_n2);
intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
}
 
static void vlv_update_pll(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
static void vlv_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
u32 dpll, dpll_md;
 
5901,8 → 7286,8
* clock for pipe B, since VGA hotplug / manual detection depends
* on it.
*/
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
/* We should never disable this, set it here for state tracking */
if (crtc->pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5915,7 → 7300,7
}
 
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
5924,7 → 7309,7
u32 bestn, bestm1, bestm2, bestp1, bestp2;
u32 coreclk, reg_val;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
bestn = pipe_config->dpll.n;
bestm1 = pipe_config->dpll.m1;
5976,7 → 7361,7
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
 
if (crtc->config.has_dp_encoder) {
if (pipe_config->has_dp_encoder) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6002,14 → 7387,14
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void chv_update_pll(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
static void chv_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (crtc->pipe != PIPE_A)
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6019,7 → 7404,7
}
 
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
6026,9 → 7411,10
int pipe = crtc->pipe;
int dpll_reg = DPLL(crtc->pipe);
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, intcoeff;
u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
int refclk;
u32 dpio_val;
int vco;
 
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6036,6 → 7422,9
bestm2 = pipe_config->dpll.m2 >> 22;
bestp1 = pipe_config->dpll.p1;
bestp2 = pipe_config->dpll.p2;
vco = pipe_config->dpll.vco;
dpio_val = 0;
loopfilter = 0;
 
/*
* Enable Refclk and SSC
6043,7 → 7432,7
I915_WRITE(dpll_reg,
pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* p1 and p2 divider */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6064,29 → 7453,58
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
 
/* M2 fraction division enable */
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
DPIO_CHV_FRAC_DIV_EN |
(2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
if (bestm2_frac)
dpio_val |= DPIO_CHV_FRAC_DIV_EN;
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
 
/* Program digital lock detect threshold */
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
if (!bestm2_frac)
dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
 
/* Loop filter */
refclk = i9xx_get_refclk(crtc, 0);
loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
2 << DPIO_CHV_GAIN_CTRL_SHIFT;
if (refclk == 100000)
intcoeff = 11;
else if (refclk == 38400)
intcoeff = 10;
else
intcoeff = 9;
loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
if (vco == 5400000) {
loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
tribuf_calcntr = 0x9;
} else if (vco <= 6200000) {
loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
tribuf_calcntr = 0x9;
} else if (vco <= 6480000) {
loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
tribuf_calcntr = 0x8;
} else {
/* Not supported. Apply the same limits as in the max case */
loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
tribuf_calcntr = 0;
}
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
 
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
 
/* AFC Recal */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
DPIO_AFC_RECAL);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
/**
6104,17 → 7522,18
{
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
struct intel_crtc_config pipe_config = {
struct intel_crtc_state pipe_config = {
.base.crtc = &crtc->base,
.pixel_multiplier = 1,
.dpll = *dpll,
};
 
if (IS_CHERRYVIEW(dev)) {
chv_update_pll(crtc, &pipe_config);
chv_compute_dpll(crtc, &pipe_config);
chv_prepare_pll(crtc, &pipe_config);
chv_enable_pll(crtc, &pipe_config);
} else {
vlv_update_pll(crtc, &pipe_config);
vlv_compute_dpll(crtc, &pipe_config);
vlv_prepare_pll(crtc, &pipe_config);
vlv_enable_pll(crtc, &pipe_config);
}
6136,7 → 7555,8
vlv_disable_pll(to_i915(dev), pipe);
}
 
static void i9xx_update_pll(struct intel_crtc *crtc,
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock,
int num_connectors)
{
6144,22 → 7564,22
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
bool is_sdvo;
struct dpll *clock = &crtc->new_config->dpll;
struct dpll *clock = &crtc_state->dpll;
 
i9xx_update_pll_dividers(crtc, reduced_clock);
i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
 
is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
 
dpll = DPLL_VGA_MODE_DIS;
 
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
 
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
dpll |= (crtc->new_config->pixel_multiplier - 1)
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
 
6166,7 → 7586,7
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
if (crtc->new_config->has_dp_encoder)
if (crtc_state->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
6194,9 → 7614,9
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
if (crtc->new_config->sdvo_tv_clock)
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
6203,16 → 7623,17
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
crtc->new_config->dpll_hw_state.dpll = dpll;
crtc_state->dpll_hw_state.dpll = dpll;
 
if (INTEL_INFO(dev)->gen >= 4) {
u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
u32 dpll_md = (crtc_state->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
crtc_state->dpll_hw_state.dpll_md = dpll_md;
}
}
 
static void i8xx_update_pll(struct intel_crtc *crtc,
static void i8xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
intel_clock_t *reduced_clock,
int num_connectors)
{
6219,13 → 7640,13
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
struct dpll *clock = &crtc->new_config->dpll;
struct dpll *clock = &crtc_state->dpll;
 
i9xx_update_pll_dividers(crtc, reduced_clock);
i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
 
dpll = DPLL_VGA_MODE_DIS;
 
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
6236,10 → 7657,10
dpll |= PLL_P2_DIVIDE_BY_4;
}
 
if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
 
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
6246,7 → 7667,7
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
crtc->new_config->dpll_hw_state.dpll = dpll;
crtc_state->dpll_hw_state.dpll = dpll;
}
 
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
6254,9 → 7675,8
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
uint32_t crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
 
6314,12 → 7734,12
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
((intel_crtc->config.pipe_src_w - 1) << 16) |
(intel_crtc->config.pipe_src_h - 1));
((intel_crtc->config->pipe_src_w - 1) << 16) |
(intel_crtc->config->pipe_src_h - 1));
}
 
static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
6327,29 → 7747,29
uint32_t tmp;
 
tmp = I915_READ(HTOTAL(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HBLANK(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(HSYNC(cpu_transcoder));
pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
 
tmp = I915_READ(VTOTAL(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VBLANK(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VSYNC(cpu_transcoder));
pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
 
if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
pipe_config->adjusted_mode.crtc_vtotal += 1;
pipe_config->adjusted_mode.crtc_vblank_end += 1;
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
pipe_config->base.adjusted_mode.crtc_vtotal += 1;
pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
}
 
tmp = I915_READ(PIPESRC(crtc->pipe));
6356,27 → 7776,32
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
 
pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
}
 
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
 
mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
 
mode->flags = pipe_config->adjusted_mode.flags;
mode->flags = pipe_config->base.adjusted_mode.flags;
mode->type = DRM_MODE_TYPE_DRIVER;
 
mode->clock = pipe_config->adjusted_mode.crtc_clock;
mode->flags |= pipe_config->adjusted_mode.flags;
mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
mode->flags |= pipe_config->base.adjusted_mode.flags;
 
mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
}
 
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6391,17 → 7816,17
(intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
 
if (intel_crtc->config.double_wide)
if (intel_crtc->config->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
 
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
 
switch (intel_crtc->config.pipe_bpp) {
switch (intel_crtc->config->pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
6426,7 → 7851,7
}
}
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6435,7 → 7860,7
} else
pipeconf |= PIPECONF_PROGRESSIVE;
 
if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
 
I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6442,25 → 7867,32
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
 
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dsi = false;
intel_clock_t clock;
bool ok;
bool is_dsi = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i;
 
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
 
encoder = to_intel_encoder(connector_state->best_encoder);
 
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
6474,8 → 7906,8
if (is_dsi)
return 0;
 
if (!crtc->new_config->clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
if (!crtc_state->clock_set) {
refclk = i9xx_get_refclk(crtc_state, num_connectors);
 
/*
* Returns a set of divisors for the desired target clock with
6483,9 → 7915,9
* the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
* 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
crtc->new_config->port_clock,
limit = intel_limit(crtc_state, refclk);
ok = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
6492,38 → 7924,23
return -EINVAL;
}
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target
* clock. If the clocks don't match, we can't switch
* the display clock by using the FP0/FP1. In such case
* we will disable the LVDS downclock feature.
*/
has_reduced_clock =
dev_priv->display.find_dpll(limit, crtc,
dev_priv->lvds_downclock,
refclk, &clock,
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
crtc->new_config->dpll.n = clock.n;
crtc->new_config->dpll.m1 = clock.m1;
crtc->new_config->dpll.m2 = clock.m2;
crtc->new_config->dpll.p1 = clock.p1;
crtc->new_config->dpll.p2 = clock.p2;
crtc_state->dpll.n = clock.n;
crtc_state->dpll.m1 = clock.m1;
crtc_state->dpll.m2 = clock.m2;
crtc_state->dpll.p1 = clock.p1;
crtc_state->dpll.p2 = clock.p2;
}
 
if (IS_GEN2(dev)) {
i8xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
i8xx_compute_dpll(crtc, crtc_state, NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
chv_update_pll(crtc, crtc->new_config);
chv_compute_dpll(crtc, crtc_state);
} else if (IS_VALLEYVIEW(dev)) {
vlv_update_pll(crtc, crtc->new_config);
vlv_compute_dpll(crtc, crtc_state);
} else {
i9xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
i9xx_compute_dpll(crtc, crtc_state, NULL,
num_connectors);
}
 
6531,7 → 7948,7
}
 
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
6561,7 → 7978,7
}
 
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
6574,9 → 7991,9
if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
return;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
clock.m2 = mdiv & DPIO_M2DIV_MASK;
6584,14 → 8001,12
clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
 
vlv_clock(refclk, &clock);
 
/* clock.dot is the fast clock */
pipe_config->port_clock = clock.dot / 5;
pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
}
 
static void i9xx_get_plane_config(struct intel_crtc *crtc,
struct intel_plane_config *plane_config)
static void
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
6598,28 → 8013,36
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
int aligned_height;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
 
crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
if (!crtc->base.primary->fb) {
val = I915_READ(DSPCNTR(plane));
if (!(val & DISPLAY_PLANE_ENABLE))
return;
 
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
 
val = I915_READ(DSPCNTR(plane));
fb = &intel_fb->base;
 
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
plane_config->tiled = true;
if (INTEL_INFO(dev)->gen >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
}
}
 
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = intel_format_to_fourcc(pixel_format);
crtc->base.primary->fb->pixel_format = fourcc;
crtc->base.primary->fb->bits_per_pixel =
drm_format_plane_cpp(fourcc, 0) * 8;
fourcc = i9xx_format_to_fourcc(pixel_format);
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
 
if (INTEL_INFO(dev)->gen >= 4) {
if (plane_config->tiled)
if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(plane));
else
offset = I915_READ(DSPLINOFF(plane));
6630,29 → 8053,30
plane_config->base = base;
 
val = I915_READ(PIPESRC(pipe));
crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
fb->width = ((val >> 16) & 0xfff) + 1;
fb->height = ((val >> 0) & 0xfff) + 1;
 
val = I915_READ(DSPSTRIDE(pipe));
crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
// fb->pitches[0] = val & 0xffffffc0;
fb->pitches[0] = 2560*4;
 
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
fb->modifier[0]);
 
plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
aligned_height);
// plane_config->size = fb->pitches[0] * aligned_height;
plane_config->size = i915_fbsize*1024*1024;
 
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
crtc->base.primary->fb->height,
crtc->base.primary->fb->bits_per_pixel, base,
crtc->base.primary->fb->pitches[0],
DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), plane, fb->width, fb->height,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
 
plane_config->fb = intel_fb;
}
 
static void chv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
6659,30 → 8083,30
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
intel_clock_t clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
mutex_unlock(&dev_priv->dpio_lock);
pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
mutex_unlock(&dev_priv->sb_lock);
 
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
clock.m2 = (pll_dw0 & 0xff) << 22;
if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
clock.m2 |= pll_dw2 & 0x3fffff;
clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
 
chv_clock(refclk, &clock);
 
/* clock.dot is the fast clock */
pipe_config->port_clock = clock.dot / 5;
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
 
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
6768,6 → 8192,14
else
i9xx_crtc_clock_get(crtc, pipe_config);
 
/*
* Normally the dotclock is filled in by the encoder .get_config()
* but in case the pipe is enabled w/o any ports we need a sane
* default.
*/
pipe_config->base.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
 
return true;
}
 
7029,11 → 8461,10
 
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
with_fdi, "LP PCH doesn't have FDI\n"))
if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
7053,13 → 8484,12
}
}
 
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
SBI_GEN0 : SBI_DBUFF0;
reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
/* Sequence to disable CLKOUT_DP */
7068,10 → 8498,9
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg, tmp;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
SBI_GEN0 : SBI_DBUFF0;
reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
7087,7 → 8516,7
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
}
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void lpt_init_pch_refclk(struct drm_device *dev)
7122,18 → 8551,23
lpt_init_pch_refclk(dev);
}
 
static int ironlake_get_refclk(struct drm_crtc *crtc)
static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
int num_connectors = 0;
int num_connectors = 0, i;
bool is_lvds = false;
 
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != to_intel_crtc(crtc))
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
 
encoder = to_intel_encoder(connector_state->best_encoder);
 
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
7162,7 → 8596,7
 
val = 0;
 
switch (intel_crtc->config.pipe_bpp) {
switch (intel_crtc->config->pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
break;
7180,15 → 8614,15
BUG();
}
 
if (intel_crtc->config.dither)
if (intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
 
if (intel_crtc->config.limited_color_range)
if (intel_crtc->config->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
 
I915_WRITE(PIPECONF(pipe), val);
7217,7 → 8651,7
* consideration.
*/
 
if (intel_crtc->config.limited_color_range)
if (intel_crtc->config->limited_color_range)
coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
 
/*
7241,7 → 8675,7
if (INTEL_INFO(dev)->gen > 6) {
uint16_t postoff = 0;
 
if (intel_crtc->config.limited_color_range)
if (intel_crtc->config->limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
 
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
7252,7 → 8686,7
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
 
if (intel_crtc->config.limited_color_range)
if (intel_crtc->config->limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
 
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
7265,15 → 8699,15
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t val;
 
val = 0;
 
if (IS_HASWELL(dev) && intel_crtc->config.dither)
if (IS_HASWELL(dev) && intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
7287,7 → 8721,7
if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
val = 0;
 
switch (intel_crtc->config.pipe_bpp) {
switch (intel_crtc->config->pipe_bpp) {
case 18:
val |= PIPEMISC_DITHER_6_BPC;
break;
7305,7 → 8739,7
BUG();
}
 
if (intel_crtc->config.dither)
if (intel_crtc->config->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
 
I915_WRITE(PIPEMISC(pipe), val);
7313,6 → 8747,7
}
 
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state,
intel_clock_t *clock,
bool *has_reduced_clock,
intel_clock_t *reduced_clock)
7319,41 → 8754,24
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
bool ret;
 
is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
refclk = ironlake_get_refclk(crtc_state);
 
refclk = ironlake_get_refclk(crtc);
 
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(intel_crtc, refclk);
ret = dev_priv->display.find_dpll(limit, intel_crtc,
intel_crtc->new_config->port_clock,
limit = intel_limit(crtc_state, refclk);
ret = dev_priv->display.find_dpll(limit, crtc_state,
crtc_state->port_clock,
refclk, NULL, clock);
if (!ret)
return false;
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target clock.
* If the clocks don't match, we can't switch the display clock
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
*has_reduced_clock =
dev_priv->display.find_dpll(limit, intel_crtc,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
}
 
return true;
}
 
7374,6 → 8792,7
}
 
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
u32 *fp,
intel_clock_t *reduced_clock, u32 *fp2)
{
7380,16 → 8799,21
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
uint32_t dpll;
int factor, num_connectors = 0;
int factor, num_connectors = 0, i;
bool is_lvds = false, is_sdvo = false;
 
for_each_intel_encoder(dev, intel_encoder) {
if (intel_encoder->new_crtc != to_intel_crtc(crtc))
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
 
switch (intel_encoder->type) {
encoder = to_intel_encoder(connector_state->best_encoder);
 
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
7411,10 → 8835,10
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (intel_crtc->new_config->sdvo_tv_clock)
} else if (crtc_state->sdvo_tv_clock)
factor = 20;
 
if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
*fp |= FP_CB_TUNE;
 
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7427,20 → 8851,20
else
dpll |= DPLLB_MODE_DAC_SERIAL;
 
dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
dpll |= (crtc_state->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_crtc->new_config->has_dp_encoder)
if (crtc_state->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
switch (intel_crtc->new_config->dpll.p2) {
switch (crtc_state->dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
7463,7 → 8887,8
return dpll | DPLL_VCO_ENABLE;
}
 
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock, reduced_clock;
7472,44 → 8897,47
bool is_lvds = false;
struct intel_shared_dpll *pll;
 
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
 
is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
 
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 
ok = ironlake_compute_clocks(&crtc->base, &clock,
ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
&has_reduced_clock, &reduced_clock);
if (!ok && !crtc->new_config->clock_set) {
if (!ok && !crtc_state->clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Compat-code for transition, will disappear. */
if (!crtc->new_config->clock_set) {
crtc->new_config->dpll.n = clock.n;
crtc->new_config->dpll.m1 = clock.m1;
crtc->new_config->dpll.m2 = clock.m2;
crtc->new_config->dpll.p1 = clock.p1;
crtc->new_config->dpll.p2 = clock.p2;
if (!crtc_state->clock_set) {
crtc_state->dpll.n = clock.n;
crtc_state->dpll.m1 = clock.m1;
crtc_state->dpll.m2 = clock.m2;
crtc_state->dpll.p1 = clock.p1;
crtc_state->dpll.p2 = clock.p2;
}
 
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (crtc->new_config->has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (crtc_state->has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
if (has_reduced_clock)
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
 
dpll = ironlake_compute_dpll(crtc,
dpll = ironlake_compute_dpll(crtc, crtc_state,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
 
crtc->new_config->dpll_hw_state.dpll = dpll;
crtc->new_config->dpll_hw_state.fp0 = fp;
crtc_state->dpll_hw_state.dpll = dpll;
crtc_state->dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
crtc->new_config->dpll_hw_state.fp1 = fp2;
crtc_state->dpll_hw_state.fp1 = fp2;
else
crtc->new_config->dpll_hw_state.fp1 = fp;
crtc_state->dpll_hw_state.fp1 = fp;
 
pll = intel_get_shared_dpll(crtc);
pll = intel_get_shared_dpll(crtc, crtc_state);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
7517,7 → 8945,7
}
}
 
if (is_lvds && has_reduced_clock && i915.powersave)
if (is_lvds && has_reduced_clock)
crtc->lowfreq_avail = true;
else
crtc->lowfreq_avail = false;
7563,7 → 8991,7
* registers are not unnecessarily read).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
crtc->config.has_drrs) {
crtc->config->has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
7584,9 → 9012,9
}
 
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
if (crtc->config.has_pch_encoder)
if (pipe_config->has_pch_encoder)
intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
else
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7595,7 → 9023,7
}
 
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
7602,23 → 9030,123
}
 
static void skylake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
uint32_t ps_ctrl = 0;
int id = -1;
int i;
 
tmp = I915_READ(PS_CTL(crtc->pipe));
 
if (tmp & PS_ENABLE) {
/* find scaler attached to this pipe */
for (i = 0; i < crtc->num_scalers; i++) {
ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
id = i;
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
break;
}
}
 
scaler_state->scaler_id = id;
if (id >= 0) {
scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
} else {
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
}
 
static void
skylake_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, base, offset, stride_mult, tiling;
int pipe = crtc->pipe;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
 
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
 
fb = &intel_fb->base;
 
val = I915_READ(PLANE_CTL(pipe, 0));
if (!(val & PLANE_CTL_ENABLE))
goto error;
 
pixel_format = val & PLANE_CTL_FORMAT_MASK;
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX,
val & PLANE_CTL_ALPHA_MASK);
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
 
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
case PLANE_CTL_TILED_LINEAR:
fb->modifier[0] = DRM_FORMAT_MOD_NONE;
break;
case PLANE_CTL_TILED_X:
plane_config->tiling = I915_TILING_X;
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
break;
default:
MISSING_CASE(tiling);
goto error;
}
 
base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
plane_config->base = base;
 
offset = I915_READ(PLANE_OFFSET(pipe, 0));
 
val = I915_READ(PLANE_SIZE(pipe, 0));
fb->height = ((val >> 16) & 0xfff) + 1;
fb->width = ((val >> 0) & 0x1fff) + 1;
 
val = I915_READ(PLANE_STRIDE(pipe, 0));
stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
// fb->pitches[0] = (val & 0x3ff) * stride_mult;
fb->pitches[0] = 2560*4;
 
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
fb->modifier[0]);
 
// plane_config->size = fb->pitches[0] * aligned_height;
plane_config->size = i915_fbsize*1024*1024;
 
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
 
plane_config->fb = intel_fb;
return;
 
error:
kfree(fb);
}
 
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
7641,68 → 9169,79
}
}
 
static void ironlake_get_plane_config(struct intel_crtc *crtc,
struct intel_plane_config *plane_config)
static void
ironlake_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int pipe = crtc->pipe;
int fourcc, pixel_format;
int aligned_height;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
 
crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
if (!crtc->base.primary->fb) {
val = I915_READ(DSPCNTR(pipe));
if (!(val & DISPLAY_PLANE_ENABLE))
return;
 
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
return;
}
 
val = I915_READ(DSPCNTR(plane));
fb = &intel_fb->base;
 
if (INTEL_INFO(dev)->gen >= 4)
if (val & DISPPLANE_TILED)
plane_config->tiled = true;
if (INTEL_INFO(dev)->gen >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
}
}
 
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = intel_format_to_fourcc(pixel_format);
crtc->base.primary->fb->pixel_format = fourcc;
crtc->base.primary->fb->bits_per_pixel =
drm_format_plane_cpp(fourcc, 0) * 8;
fourcc = i9xx_format_to_fourcc(pixel_format);
fb->pixel_format = fourcc;
fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
 
base = I915_READ(DSPSURF(plane)) & 0xfffff000;
base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
offset = I915_READ(DSPOFFSET(plane));
offset = I915_READ(DSPOFFSET(pipe));
} else {
if (plane_config->tiled)
offset = I915_READ(DSPTILEOFF(plane));
if (plane_config->tiling)
offset = I915_READ(DSPTILEOFF(pipe));
else
offset = I915_READ(DSPLINOFF(plane));
offset = I915_READ(DSPLINOFF(pipe));
}
plane_config->base = base;
 
val = I915_READ(PIPESRC(pipe));
crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
fb->width = ((val >> 16) & 0xfff) + 1;
fb->height = ((val >> 0) & 0xfff) + 1;
 
val = I915_READ(DSPSTRIDE(pipe));
crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
// fb->pitches[0] = val & 0xffffffc0;
fb->pitches[0] = 2560*4;
 
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
aligned_height = intel_fb_align_height(dev, fb->height,
fb->pixel_format,
fb->modifier[0]);
 
plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
aligned_height);
// plane_config->size = fb->pitches[0] * aligned_height;
plane_config->size = i915_fbsize*1024*1024;
 
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
crtc->base.primary->fb->height,
crtc->base.primary->fb->bits_per_pixel, base,
crtc->base.primary->fb->pitches[0],
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
fb->bits_per_pixel, base, fb->pitches[0],
plane_config->size);
 
plane_config->fb = intel_fb;
}
 
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
7789,24 → 9328,24
struct intel_crtc *crtc;
 
for_each_intel_crtc(dev, crtc)
WARN(crtc->active, "CRTC for pipe %c enabled\n",
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
 
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev))
WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
 
/*
* In theory we can still leave IRQs enabled, as long as only the HPD
7814,7 → 9353,7
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
 
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
7912,19 → 9451,8
/*
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
*
* The other problem is that hsw_restore_lcpll() is called as part of
* the runtime PM resume sequence, so we can't just call
* gen6_gt_force_wake_get() because that function calls
* intel_runtime_pm_get(), and we can't change the runtime PM refcount
* while we are on the resume sequence. So to solve this problem we have
* to call special forcewake code that doesn't touch runtime PM and
* doesn't enable the forcewake delayed work.
*/
spin_lock_irq(&dev_priv->uncore.lock);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
spin_unlock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
7954,11 → 9482,8
DRM_ERROR("Switching back to LCPLL failed\n");
}
 
/* See the big comment above. */
spin_lock_irq(&dev_priv->uncore.lock);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv->dev);
}
 
/*
7991,7 → 9516,7
 
DRM_DEBUG_KMS("Enabling package C8+\n");
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
if (HAS_PCH_LPT_LP(dev)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8011,7 → 9536,7
hsw_restore_lcpll(dev_priv);
lpt_init_pch_refclk(dev);
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
if (HAS_PCH_LPT_LP(dev)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
8020,9 → 9545,166
intel_prepare_ddi(dev);
}
 
static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
if (!intel_ddi_pll_select(crtc))
struct drm_device *dev = old_state->dev;
unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 
broxton_set_cdclk(dev, req_cdclk);
}
 
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
int max_pixel_rate = 0;
 
for_each_intel_crtc(state->dev, intel_crtc) {
int pixel_rate;
 
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
 
if (!crtc_state->base.enable)
continue;
 
pixel_rate = ilk_pipe_pixel_rate(crtc_state);
 
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
 
max_pixel_rate = max(max_pixel_rate, pixel_rate);
}
 
return max_pixel_rate;
}
 
static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val, data;
int ret;
 
if (WARN((I915_READ(LCPLL_CTL) &
(LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
 
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("failed to inform pcode about cdclk change\n");
return;
}
 
val = I915_READ(LCPLL_CTL);
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
 
if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
 
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_CLK_FREQ_MASK;
 
switch (cdclk) {
case 450000:
val |= LCPLL_CLK_FREQ_450;
data = 0;
break;
case 540000:
val |= LCPLL_CLK_FREQ_54O_BDW;
data = 1;
break;
case 337500:
val |= LCPLL_CLK_FREQ_337_5_BDW;
data = 2;
break;
case 675000:
val |= LCPLL_CLK_FREQ_675_BDW;
data = 3;
break;
default:
WARN(1, "invalid cdclk frequency\n");
return;
}
 
I915_WRITE(LCPLL_CTL, val);
 
val = I915_READ(LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
 
if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
 
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
mutex_unlock(&dev_priv->rps.hw_lock);
 
intel_update_cdclk(dev);
 
WARN(cdclk != dev_priv->cdclk_freq,
"cdclk requested %d kHz but got %d kHz\n",
cdclk, dev_priv->cdclk_freq);
}
 
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
int max_pixclk = ilk_max_pixel_rate(state);
int cdclk;
 
/*
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
if (max_pixclk > 540000)
cdclk = 675000;
else if (max_pixclk > 450000)
cdclk = 540000;
else if (max_pixclk > 337500)
cdclk = 450000;
else
cdclk = 337500;
 
/*
* FIXME move the cdclk caclulation to
* compute_config() so we can fail gracegully.
*/
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
cdclk = dev_priv->max_cdclk_freq;
}
 
to_intel_atomic_state(state)->cdclk = cdclk;
 
return 0;
}
 
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 
broadwell_set_cdclk(dev, req_cdclk);
}
 
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
if (!intel_ddi_pll_select(crtc, crtc_state))
return -EINVAL;
 
crtc->lowfreq_avail = false;
8030,16 → 9712,47
return 0;
}
 
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
switch (port) {
case PORT_A:
pipe_config->ddi_pll_sel = SKL_DPLL0;
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
break;
case PORT_B:
pipe_config->ddi_pll_sel = SKL_DPLL1;
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
break;
case PORT_C:
pipe_config->ddi_pll_sel = SKL_DPLL2;
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
break;
default:
DRM_ERROR("Incorrect port type\n");
}
}
 
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
u32 temp;
u32 temp, dpll_ctl1;
 
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
 
switch (pipe_config->ddi_pll_sel) {
case SKL_DPLL0:
/*
* On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
* of the shared DPLL framework and thus needs to be read out
* separately
*/
dpll_ctl1 = I915_READ(DPLL_CTRL1);
pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
break;
case SKL_DPLL1:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
break;
8054,7 → 9767,7
 
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
8065,11 → 9778,13
case PORT_CLK_SEL_WRPLL2:
pipe_config->shared_dpll = DPLL_ID_WRPLL2;
break;
case PORT_CLK_SEL_SPLL:
pipe_config->shared_dpll = DPLL_ID_SPLL;
}
}
 
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
8083,6 → 9798,8
 
if (IS_SKYLAKE(dev))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_BROXTON(dev))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
 
8111,7 → 9828,7
}
 
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
8159,9 → 9876,19
 
intel_get_pipe_timings(crtc, pipe_config);
 
if (INTEL_INFO(dev)->gen >= 9) {
skl_init_scalers(dev, crtc, pipe_config);
}
 
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
 
if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
 
if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
if (IS_SKYLAKE(dev))
if (INTEL_INFO(dev)->gen >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
ironlake_get_pfit_config(crtc, pipe_config);
8181,7 → 9908,7
return true;
}
 
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
8188,9 → 9915,9
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
 
if (base) {
unsigned int width = intel_crtc->cursor_width;
unsigned int height = intel_crtc->cursor_height;
if (on) {
unsigned int width = intel_crtc->base.cursor->state->crtc_w;
unsigned int height = intel_crtc->base.cursor->state->crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
 
switch (stride) {
8221,13 → 9948,13
/* On these chipsets we can only modify the base/size/stride
* whilst the cursor is disabled.
*/
I915_WRITE(_CURACNTR, 0);
POSTING_READ(_CURACNTR);
I915_WRITE(CURCNTR(PIPE_A), 0);
POSTING_READ(CURCNTR(PIPE_A));
intel_crtc->cursor_cntl = 0;
}
 
if (intel_crtc->cursor_base != base) {
I915_WRITE(_CURABASE, base);
I915_WRITE(CURBASE(PIPE_A), base);
intel_crtc->cursor_base = base;
}
 
8237,24 → 9964,23
}
 
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(_CURACNTR, cntl);
POSTING_READ(_CURACNTR);
I915_WRITE(CURCNTR(PIPE_A), cntl);
POSTING_READ(CURCNTR(PIPE_A));
intel_crtc->cursor_cntl = cntl;
}
}
 
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t cntl;
uint32_t cntl = 0;
 
cntl = 0;
if (base) {
if (on) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (intel_crtc->cursor_width) {
switch (intel_crtc->base.cursor->state->crtc_w) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
8265,16 → 9991,16
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
WARN_ON(1);
MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
if (HAS_DDI(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
}
 
if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
cntl |= CURSOR_ROTATE_180;
 
if (intel_crtc->cursor_cntl != cntl) {
8298,22 → 10024,22
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int x = crtc->cursor_x;
int y = crtc->cursor_y;
struct drm_plane_state *cursor_state = crtc->cursor->state;
int x = cursor_state->crtc_x;
int y = cursor_state->crtc_y;
u32 base = 0, pos = 0;
 
if (on)
base = intel_crtc->cursor_addr;
 
if (x >= intel_crtc->config.pipe_src_w)
base = 0;
if (x >= intel_crtc->config->pipe_src_w)
on = false;
 
if (y >= intel_crtc->config.pipe_src_h)
base = 0;
if (y >= intel_crtc->config->pipe_src_h)
on = false;
 
if (x < 0) {
if (x + intel_crtc->cursor_width <= 0)
base = 0;
if (x + cursor_state->crtc_w <= 0)
on = false;
 
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
8321,8 → 10047,8
pos |= x << CURSOR_X_SHIFT;
 
if (y < 0) {
if (y + intel_crtc->cursor_height <= 0)
base = 0;
if (y + cursor_state->crtc_h <= 0)
on = false;
 
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
8329,22 → 10055,19
}
pos |= y << CURSOR_Y_SHIFT;
 
if (base == 0 && intel_crtc->cursor_base == 0)
return;
 
I915_WRITE(CURPOS(pipe), pos);
 
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
base += (intel_crtc->cursor_height *
intel_crtc->cursor_width - 1) * 4;
crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
base += (cursor_state->crtc_h *
cursor_state->crtc_w - 1) * 4;
}
 
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
i845_update_cursor(crtc, base, on);
else
i9xx_update_cursor(crtc, base);
i9xx_update_cursor(crtc, base, on);
}
 
static bool cursor_size_ok(struct drm_device *dev,
8384,109 → 10107,6
return true;
}
 
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
uint32_t addr;
int ret;
 
/* if we want to turn off the cursor ignore width and height */
if (!obj) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
mutex_lock(&dev->struct_mutex);
goto finish;
}
 
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical) {
unsigned alignment;
 
/*
* Global gtt pte registers are special registers which actually
* forward writes to a chunk of system memory. Which means that
* there is no risk that the register values disappear as soon
* as we call intel_runtime_pm_put(), so it is correct to wrap
* only the pin/unpin/fence and not more.
*/
intel_runtime_pm_get(dev_priv);
 
/* Note that the w/a also requires 2 PTE of padding following
* the bo. We currently fill all unused PTE with the shadow
* page and so we should always have valid PTE following the
* cursor preventing the VT-d warning.
*/
alignment = 0;
if (need_vtd_wa(dev))
alignment = 64*1024;
 
ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
if (ret) {
DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
intel_runtime_pm_put(dev_priv);
goto fail_locked;
}
 
ret = i915_gem_object_put_fence(obj);
if (ret) {
DRM_DEBUG_KMS("failed to release fence for cursor");
intel_runtime_pm_put(dev_priv);
goto fail_unpin;
}
 
addr = i915_gem_obj_ggtt_offset(obj);
 
intel_runtime_pm_put(dev_priv);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = 1;//i915_gem_object_attach_phys(obj, align);
if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked;
}
addr = obj->phys_handle->busaddr;
}
 
finish:
if (intel_crtc->cursor_bo) {
if (!INTEL_INFO(dev)->cursor_needs_physical)
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
}
 
i915_gem_track_fb(intel_crtc->cursor_bo, obj,
INTEL_FRONTBUFFER_CURSOR(pipe));
mutex_unlock(&dev->struct_mutex);
 
old_width = intel_crtc->cursor_width;
 
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
 
if (intel_crtc->active) {
if (old_width != width)
intel_update_watermarks(crtc);
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
}
 
return 0;
fail_unpin:
i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
8591,7 → 10211,7
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
#ifdef CONFIG_DRM_I915_FBDEV
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
8619,6 → 10239,41
#endif
}
 
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
int x, int y)
{
struct drm_plane_state *plane_state;
int hdisplay, vdisplay;
int ret;
 
plane_state = drm_atomic_get_plane_state(state, crtc->primary);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
 
if (mode)
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
else
hdisplay = vdisplay = 0;
 
ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
if (ret)
return ret;
drm_atomic_set_fb_for_plane(plane_state, fb);
plane_state->crtc_x = 0;
plane_state->crtc_y = 0;
plane_state->crtc_w = hdisplay;
plane_state->crtc_h = vdisplay;
plane_state->src_x = x << 16;
plane_state->src_y = y << 16;
plane_state->src_w = hdisplay << 16;
plane_state->src_h = vdisplay << 16;
 
return 0;
}
 
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
8633,6 → 10288,9
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
int ret, i = -1;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8642,7 → 10300,7
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail_unlock;
goto fail;
 
/*
* Algorithm gets a little messy:
8660,10 → 10318,10
 
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
goto fail;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
goto fail_unlock;
goto fail;
 
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
8680,11 → 10338,8
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
if (possible_crtc->enabled)
if (possible_crtc->state->enable)
continue;
/* This can occur when applying the pipe A quirk on resume. */
if (to_intel_crtc(possible_crtc)->new_enabled)
continue;
 
crtc = possible_crtc;
break;
8695,25 → 10350,44
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
goto fail_unlock;
goto fail;
}
 
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
goto fail;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
goto fail_unlock;
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
goto fail;
 
intel_crtc = to_intel_crtc(crtc);
intel_crtc->new_enabled = true;
intel_crtc->new_config = &intel_crtc->config;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
 
state = drm_atomic_state_alloc(dev);
if (!state)
return false;
 
state->acquire_ctx = ctx;
 
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
ret = PTR_ERR(connector_state);
goto fail;
}
 
connector_state->crtc = crtc;
connector_state->best_encoder = &intel_encoder->base;
 
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto fail;
}
 
crtc_state->base.active = crtc_state->base.enable = true;
 
if (!mode)
mode = &load_detect_mode;
 
8736,12 → 10410,19
goto fail;
}
 
if (intel_set_mode(crtc, mode, 0, 0, fb)) {
ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
if (ret)
goto fail;
 
drm_mode_copy(&crtc_state->base.mode, mode);
 
if (drm_atomic_commit(state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
goto fail;
}
crtc->primary->crtc = crtc;
 
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
8748,12 → 10429,9
return true;
 
fail:
intel_crtc->new_enabled = crtc->enabled;
if (intel_crtc->new_enabled)
intel_crtc->new_config = &intel_crtc->config;
else
intel_crtc->new_config = NULL;
fail_unlock:
drm_atomic_state_free(state);
state = NULL;
 
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
8763,13 → 10441,19
}
 
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old)
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_atomic_state *state;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
int ret;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
8776,12 → 10460,34
encoder->base.id, encoder->name);
 
if (old->load_detect_temp) {
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_crtc->new_enabled = false;
intel_crtc->new_config = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
state = drm_atomic_state_alloc(dev);
if (!state)
goto fail;
 
state->acquire_ctx = ctx;
 
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state))
goto fail;
 
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state))
goto fail;
 
connector_state->best_encoder = NULL;
connector_state->crtc = NULL;
 
crtc_state->base.enable = crtc_state->base.active = false;
 
ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
0, 0);
if (ret)
goto fail;
 
ret = drm_atomic_commit(state);
if (ret)
goto fail;
 
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
drm_framebuffer_unreference(old->release_fb);
8793,10 → 10499,15
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
 
return;
fail:
DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
drm_atomic_state_free(state);
}
 
static int i9xx_pll_refclk(struct drm_device *dev,
const struct intel_crtc_config *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll = pipe_config->dpll_hw_state.dpll;
8813,7 → 10524,7
 
/* Returns the clock of the currently programmed mode of the given pipe. */
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
8821,6 → 10532,7
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
intel_clock_t clock;
int port_clock;
int refclk = i9xx_pll_refclk(dev, pipe_config);
 
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8861,9 → 10573,9
}
 
if (IS_PINEVIEW(dev))
pineview_clock(refclk, &clock);
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
i9xx_clock(refclk, &clock);
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8889,7 → 10601,7
clock.p2 = 2;
}
 
i9xx_clock(refclk, &clock);
port_clock = i9xx_calc_dpll_params(refclk, &clock);
}
 
/*
8897,7 → 10609,7
* port_clock to compute adjusted_mode.crtc_clock in the
* encoder's get_config() function.
*/
pipe_config->port_clock = clock.dot;
pipe_config->port_clock = port_clock;
}
 
int intel_dotclock_calculate(int link_freq,
8920,7 → 10632,7
}
 
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
 
8933,7 → 10645,7
* agree once we know their relationship in the encoder's
* get_config() function.
*/
pipe_config->adjusted_mode.crtc_clock =
pipe_config->base.adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
&pipe_config->fdi_m_n);
}
8944,9 → 10656,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
struct intel_crtc_config pipe_config;
struct intel_crtc_state pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
8986,42 → 10698,6
return mode;
}
 
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (!HAS_GMCH_DISPLAY(dev))
return;
 
if (!dev_priv->lvds_downclock_avail)
return;
 
/*
* Since this is called by a timer, we should never get here in
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll;
 
DRM_DEBUG_DRIVER("downclocking LVDS\n");
 
assert_panel_unlocked(dev_priv, pipe);
 
dpll = I915_READ(dpll_reg);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
}
 
}
 
void intel_mark_busy(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
9031,6 → 10707,8
 
intel_runtime_pm_get(dev_priv);
i915_update_gfx_val(dev_priv);
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_busy(dev_priv);
dev_priv->mm.busy = true;
}
 
9037,7 → 10715,6
void intel_mark_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
if (!dev_priv->mm.busy)
return;
9044,20 → 10721,9
 
dev_priv->mm.busy = false;
 
if (!i915.powersave)
goto out;
 
for_each_crtc(dev, crtc) {
if (!crtc->primary->fb)
continue;
 
intel_decrease_pllclock(crtc);
}
 
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
 
out:
intel_runtime_pm_put(dev_priv);
}
 
9087,21 → 10753,23
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
struct intel_crtc *crtc = to_intel_crtc(work->crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_plane *primary = crtc->base.primary;
 
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
intel_unpin_fb_obj(work->old_fb, primary->state);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
 
intel_update_fbc(dev);
if (work->flip_queued_req)
i915_gem_request_assign(&work->flip_queued_req, NULL);
mutex_unlock(&dev->struct_mutex);
 
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
drm_framebuffer_unreference(work->old_fb);
 
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
atomic_dec(&crtc->unpin_work_count);
 
kfree(work);
}
9195,7 → 10863,7
*/
return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
crtc->unpin_work->gtt_offset &&
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
crtc->unpin_work->flip_count);
}
 
9221,11 → 10889,11
spin_unlock_irqrestore(&dev->event_lock, flags);
}
 
static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
{
/* Ensure that the work item is consistent when activating it ... */
smp_wmb();
atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
atomic_set(&work->pending, INTEL_FLIP_PENDING);
/* and that it is marked active as soon as the irq could fire. */
smp_wmb();
}
9234,14 → 10902,15
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
9260,8 → 10929,7
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
 
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
 
9269,14 → 10937,15
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
9292,8 → 10961,7
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, MI_NOOP);
 
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
 
9301,15 → 10969,16
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
9331,8 → 11000,7
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
 
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
 
9340,15 → 11008,16
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
9367,8 → 11036,7
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
 
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
 
9376,9 → 11044,10
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
9420,11 → 11089,11
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
ret = intel_ring_cacheline_align(ring);
ret = intel_ring_cacheline_align(req);
if (ret)
return ret;
 
ret = intel_ring_begin(ring, len);
ret = intel_ring_begin(req, len);
if (ret)
return ret;
 
9444,10 → 11113,10
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9462,12 → 11131,154
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
 
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
 
static int intel_default_queue_flip(struct drm_device *dev,
static bool use_mmio_flip(struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj)
{
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
* CS flips. Older platforms derive flip done using some clever
* tricks involving the flip_pending status bits and vblank irqs.
* So using MMIO flips there would disrupt this mechanism.
*/
 
if (ring == NULL)
return true;
 
if (INTEL_INFO(ring->dev)->gen < 5)
return false;
 
if (i915.use_mmio_flip < 0)
return false;
else if (i915.use_mmio_flip > 0)
return true;
else if (i915.enable_execlists)
return true;
else
return ring != i915_gem_request_get_ring(obj->last_write_req);
}
 
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
struct intel_unpin_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
const enum pipe pipe = intel_crtc->pipe;
u32 ctl, stride;
 
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
switch (fb->modifier[0]) {
case DRM_FORMAT_MOD_NONE:
break;
case I915_FORMAT_MOD_X_TILED:
ctl |= PLANE_CTL_TILED_X;
break;
case I915_FORMAT_MOD_Y_TILED:
ctl |= PLANE_CTL_TILED_Y;
break;
case I915_FORMAT_MOD_Yf_TILED:
ctl |= PLANE_CTL_TILED_YF;
break;
default:
MISSING_CASE(fb->modifier[0]);
}
 
/*
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
stride = fb->pitches[0] /
intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
 
/*
* Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
* PLANE_SURF updates, the update is then guaranteed to be atomic.
*/
I915_WRITE(PLANE_CTL(pipe, 0), ctl);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
 
I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
POSTING_READ(PLANE_SURF(pipe, 0));
}
 
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
struct intel_unpin_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
u32 dspcntr;
u32 reg;
 
reg = DSPCNTR(intel_crtc->plane);
dspcntr = I915_READ(reg);
 
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
 
I915_WRITE(reg, dspcntr);
 
I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
}
 
/*
* XXX: This is the temporary way to update the plane registers until we get
* around to using the usual plane update functions for MMIO flips
*/
static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
{
struct intel_crtc *crtc = mmio_flip->crtc;
struct intel_unpin_work *work;
 
spin_lock_irq(&crtc->base.dev->event_lock);
work = crtc->unpin_work;
spin_unlock_irq(&crtc->base.dev->event_lock);
if (work == NULL)
return;
 
intel_mark_page_flip_active(work);
 
intel_pipe_update_start(crtc);
 
if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
skl_do_mmio_flip(crtc, work);
else
/* use_mmio_flip() retricts MMIO flips to ilk+ */
ilk_do_mmio_flip(crtc, work);
 
intel_pipe_update_end(crtc);
}
 
static void intel_mmio_flip_work_func(struct work_struct *work)
{
struct intel_mmio_flip *mmio_flip =
container_of(work, struct intel_mmio_flip, work);
 
if (mmio_flip->req) {
WARN_ON(__i915_wait_request(mmio_flip->req,
mmio_flip->crtc->reset_counter,
false, NULL,
&mmio_flip->i915->rps.mmioflips));
i915_gem_request_unreference__unlocked(mmio_flip->req);
}
 
intel_do_mmio_flip(mmio_flip);
kfree(mmio_flip);
}
 
static int intel_queue_mmio_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
9474,9 → 11285,100
struct intel_engine_cs *ring,
uint32_t flags)
{
struct intel_mmio_flip *mmio_flip;
 
mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
if (mmio_flip == NULL)
return -ENOMEM;
 
mmio_flip->i915 = to_i915(dev);
mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
mmio_flip->crtc = to_intel_crtc(crtc);
 
INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
schedule_work(&mmio_flip->work);
 
return 0;
}
 
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags)
{
return -ENODEV;
}
 
static bool __intel_pageflip_stall_check(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work = intel_crtc->unpin_work;
u32 addr;
 
if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
return true;
 
if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
return false;
 
if (!work->enable_stall_check)
return false;
 
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_req &&
!i915_gem_request_completed(work->flip_queued_req, true))
return false;
 
work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
}
 
if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
return false;
 
/* Potential stall - if we see that the flip has happened,
* assume a missed interrupt. */
if (INTEL_INFO(dev)->gen >= 4)
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
else
addr = I915_READ(DSPADDR(intel_crtc->plane));
 
/* There is a potential issue here with a false positive after a flip
* to the same address. We could address this by checking for a
* non-incrementing frame counter.
*/
return addr == work->gtt_offset;
}
 
void intel_check_page_flip(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
 
WARN_ON(!in_interrupt());
 
if (crtc == NULL)
return;
 
spin_lock(&dev->event_lock);
work = intel_crtc->unpin_work;
if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
work->flip_queued_vblank, drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
work = NULL;
}
if (work != NULL &&
drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
 
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
9487,9 → 11389,12
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
int ret;
 
/*
9522,7 → 11427,7
 
work->event = event;
work->crtc = crtc;
work->old_fb_obj = intel_fb_obj(old_fb);
work->old_fb = old_fb;
INIT_WORK(&work->work, intel_unpin_work_fn);
 
ret = drm_crtc_vblank_get(crtc);
9553,33 → 11458,34
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
 
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
drm_framebuffer_reference(work->old_fb);
drm_gem_object_reference(&obj->base);
 
crtc->primary->fb = fb;
update_state_fb(crtc->primary);
 
work->pending_flip_obj = obj;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
 
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
 
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
} else if (IS_IVYBRIDGE(dev)) {
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
ring = i915_gem_request_get_ring(obj->last_write_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
} else {
9586,55 → 11492,78
ring = &dev_priv->ring[RCS];
}
 
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
mmio_flip = use_mmio_flip(ring, obj);
 
/* When using CS flips, we want to emit semaphores between rings.
* However, when using mmio flips we will create a task to do the
* synchronisation, so all we want here is to pin the framebuffer
* into the display plane and skip any waits.
*/
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
crtc->primary->state,
mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
if (ret)
goto cleanup_pending;
 
work->gtt_offset =
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
obj, 0);
work->gtt_offset += intel_crtc->dspaddr_offset;
 
if (use_mmio_flip(ring, obj)) {
if (mmio_flip) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
if (ret)
goto cleanup_unpin;
 
work->flip_queued_seqno = obj->last_write_seqno;
work->flip_queued_ring = obj->ring;
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
} else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
if (!request) {
ret = i915_gem_request_alloc(ring, ring->default_context, &request);
if (ret)
goto cleanup_unpin;
}
 
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
goto cleanup_unpin;
 
work->flip_queued_seqno = intel_ring_get_seqno(ring);
work->flip_queued_ring = ring;
i915_gem_request_assign(&work->flip_queued_req, request);
}
 
work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
if (request)
i915_add_request_no_flush(request);
 
work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
work->enable_stall_check = true;
 
i915_gem_track_fb(work->old_fb_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
 
intel_disable_fbc(dev);
intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
 
intel_fbc_disable_crtc(intel_crtc);
intel_frontbuffer_flip_prepare(dev,
to_intel_plane(primary)->frontbuffer_bit);
 
trace_i915_flip_request(intel_crtc->plane, obj);
 
return 0;
 
cleanup_unpin:
intel_unpin_fb_obj(obj);
intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
if (request)
i915_gem_request_cancel(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
cleanup:
crtc->primary->fb = old_fb;
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
update_state_fb(crtc->primary);
 
cleanup:
drm_gem_object_unreference_unlocked(&obj->base);
drm_framebuffer_unreference(work->old_fb);
 
spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
spin_unlock_irq(&dev->event_lock);
9644,9 → 11573,35
kfree(work);
 
if (ret == -EIO) {
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
 
out_hang:
// intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 
retry:
plane_state = drm_atomic_get_plane_state(state, primary);
ret = PTR_ERR_OR_ZERO(plane_state);
if (!ret) {
drm_atomic_set_fb_for_plane(plane_state, fb);
 
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (!ret)
ret = drm_atomic_commit(state);
}
 
if (ret == -EDEADLK) {
drm_modeset_backoff(state->acquire_ctx);
drm_atomic_state_clear(state);
goto retry;
}
 
if (ret)
drm_atomic_state_free(state);
 
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
9657,72 → 11612,294
}
#endif
 
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
};
 
/**
* intel_modeset_update_staged_output_state
* intel_wm_need_update - Check whether watermarks need updating
* @plane: drm plane
* @state: new plane state
*
* Updates the staged output configuration state, e.g. after we've read out the
* current hw state.
* Check current plane state versus the new one to determine whether
* watermarks need to be recalculated.
*
* Returns true or false.
*/
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
static bool intel_wm_need_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
/* Update watermarks on tiling changes. */
if (!plane->state->fb || !state->fb ||
plane->state->fb->modifier[0] != state->fb->modifier[0] ||
plane->state->rotation != state->rotation)
return true;
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
connector->new_encoder =
to_intel_encoder(connector->base.encoder);
if (plane->state->crtc_w != state->crtc_w)
return true;
 
return false;
}
 
for_each_intel_encoder(dev, encoder) {
encoder->new_crtc =
to_intel_crtc(encoder->base.crtc);
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *plane = plane_state->plane;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
int idx = intel_crtc->base.base.id, ret;
int i = drm_plane_index(plane);
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
 
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
 
if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
plane->type != DRM_PLANE_TYPE_CURSOR) {
ret = skl_update_scaler_plane(
to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state));
if (ret)
return ret;
}
 
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = crtc->base.enabled;
/*
* Disabling a plane is always okay; we just need to update
* fb tracking in a special way since cleanup_fb() won't
* get called by the plane helpers.
*/
if (old_plane_state->base.fb && !fb)
intel_crtc->atomic.disabled_planes |= 1 << i;
 
if (crtc->new_enabled)
crtc->new_config = &crtc->config;
else
crtc->new_config = NULL;
was_visible = old_plane_state->visible;
visible = to_intel_plane_state(plane_state)->visible;
 
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
 
if (!is_crtc_enabled && WARN_ON(visible))
visible = false;
 
if (!was_visible && !visible)
return 0;
 
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
 
DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
plane->base.id, fb ? fb->base.id : -1);
 
DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
plane->base.id, was_visible, visible,
turn_off, turn_on, mode_changed);
 
if (turn_on) {
intel_crtc->atomic.update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
intel_crtc->atomic.disable_cxsr = true;
/* to potentially re-enable cxsr */
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.update_wm_post = true;
}
} else if (turn_off) {
intel_crtc->atomic.update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
if (is_crtc_enabled)
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.disable_cxsr = true;
}
} else if (intel_wm_need_update(plane, plane_state)) {
intel_crtc->atomic.update_wm_pre = true;
}
 
/**
* intel_modeset_commit_output_state
if (visible || was_visible)
intel_crtc->atomic.fb_bits |=
to_intel_plane(plane)->frontbuffer_bit;
 
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
intel_crtc->atomic.wait_for_flips = true;
intel_crtc->atomic.pre_disable_primary = turn_off;
intel_crtc->atomic.post_enable_primary = turn_on;
 
if (turn_off) {
/*
* FIXME: Actually if we will still have any other
* plane enabled on the pipe we could let IPS enabled
* still, but for now lets consider that when we make
* primary invisible by setting DSPCNTR to 0 on
* update_primary_plane function IPS needs to be
* disable.
*/
intel_crtc->atomic.disable_ips = true;
 
intel_crtc->atomic.disable_fbc = true;
}
 
/*
* FBC does not work on some platforms for rotated
* planes, so disable it when rotation is not 0 and
* update it when rotation is set back to 0.
*
* This function copies the stage display pipe configuration to the real one.
* FIXME: This is redundant with the fbc update done in
* the primary plane enable function except that that
* one is done too late. We eventually need to unify
* this.
*/
static void intel_modeset_commit_output_state(struct drm_device *dev)
 
if (visible &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
dev_priv->fbc.crtc == intel_crtc &&
plane_state->rotation != BIT(DRM_ROTATE_0))
intel_crtc->atomic.disable_fbc = true;
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (turn_on && IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
 
intel_crtc->atomic.update_fbc |= visible || mode_changed;
break;
case DRM_PLANE_TYPE_CURSOR:
break;
case DRM_PLANE_TYPE_OVERLAY:
if (turn_off && !mode_changed) {
intel_crtc->atomic.wait_vblank = true;
intel_crtc->atomic.update_sprite_watermarks |=
1 << i;
}
}
return 0;
}
 
static bool encoders_cloneable(const struct intel_encoder *a,
const struct intel_encoder *b)
{
struct intel_crtc *crtc;
/* masks could be asymmetric, so check both ways */
return a == b || (a->cloneable & (1 << b->type) &&
b->cloneable & (1 << a->type));
}
 
static bool check_single_encoder_cloning(struct drm_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_encoder *source_encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i;
 
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
 
source_encoder =
to_intel_encoder(connector_state->best_encoder);
if (!encoders_cloneable(encoder, source_encoder))
return false;
}
 
return true;
}
 
static bool check_encoder_cloning(struct drm_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i;
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
connector->base.encoder = &connector->new_encoder->base;
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
 
encoder = to_intel_encoder(connector_state->best_encoder);
if (!check_single_encoder_cloning(state, crtc, encoder))
return false;
}
 
for_each_intel_encoder(dev, encoder) {
encoder->base.crtc = &encoder->new_crtc->base;
return true;
}
 
for_each_intel_crtc(dev, crtc) {
crtc->base.enabled = crtc->new_enabled;
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
struct drm_atomic_state *state = crtc_state->state;
int ret;
bool mode_changed = needs_modeset(crtc_state);
 
if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
return -EINVAL;
}
 
if (mode_changed && !crtc_state->active)
intel_crtc->atomic.update_wm_post = true;
 
if (mode_changed && crtc_state->enable &&
dev_priv->display.crtc_compute_clock &&
!WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
ret = dev_priv->display.crtc_compute_clock(intel_crtc,
pipe_config);
if (ret)
return ret;
}
 
ret = 0;
if (INTEL_INFO(dev)->gen >= 9) {
if (mode_changed)
ret = skl_update_scaler_crtc(pipe_config);
 
if (!ret)
ret = intel_atomic_setup_scalers(dev, intel_crtc,
pipe_config);
}
 
return ret;
}
 
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
.atomic_begin = intel_begin_crtc_commit,
.atomic_flush = intel_finish_crtc_commit,
.atomic_check = intel_crtc_atomic_check,
};
 
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
{
struct intel_connector *connector;
 
for_each_intel_connector(dev, connector) {
if (connector->base.encoder) {
connector->base.state->best_encoder =
connector->base.encoder;
connector->base.state->crtc =
connector->base.encoder->crtc;
} else {
connector->base.state->best_encoder = NULL;
connector->base.state->crtc = NULL;
}
}
}
 
static void
connected_sink_compute_bpp(struct intel_connector *connector,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
int bpp = pipe_config->pipe_bpp;
 
9748,59 → 11925,33
 
static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct intel_connector *connector;
int bpp;
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
 
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
bpp = 8*3; /* since we go through a colormap */
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
/* checked in intel_framebuffer_init already */
if (WARN_ON(INTEL_INFO(dev)->gen > 3))
return -EINVAL;
case DRM_FORMAT_RGB565:
bpp = 6*3; /* min is 18bpp */
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
/* checked in intel_framebuffer_init already */
if (WARN_ON(INTEL_INFO(dev)->gen < 4))
return -EINVAL;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
bpp = 10*3;
else if (INTEL_INFO(dev)->gen >= 5)
bpp = 12*3;
else
bpp = 8*3;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
/* checked in intel_framebuffer_init already */
if (WARN_ON(INTEL_INFO(dev)->gen < 4))
return -EINVAL;
bpp = 10*3;
break;
/* TODO: gen4+ supports 16 bpc floating point, too. */
default:
DRM_DEBUG_KMS("unsupported depth\n");
return -EINVAL;
}
 
 
pipe_config->pipe_bpp = bpp;
 
state = pipe_config->base.state;
 
/* Clamp display bpp to EDID value */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (!connector->new_encoder ||
connector->new_encoder->new_crtc != crtc)
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
 
connected_sink_compute_bpp(connector, pipe_config);
connected_sink_compute_bpp(to_intel_connector(connector),
pipe_config);
}
 
return bpp;
9818,12 → 11969,18
}
 
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
struct intel_crtc_state *pipe_config,
const char *context)
{
DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
context, pipe_name(crtc->pipe));
struct drm_device *dev = crtc->base.dev;
struct drm_plane *plane;
struct intel_plane *intel_plane;
struct intel_plane_state *state;
struct drm_framebuffer *fb;
 
DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
context, pipe_config, pipe_name(crtc->pipe));
 
DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
pipe_config->pipe_bpp, pipe_config->dither);
9833,14 → 11990,16
pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
pipe_config->has_dp_encoder,
pipe_config->lane_count,
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
pipe_config->dp_m_n.tu);
 
DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
pipe_config->has_dp_encoder,
pipe_config->lane_count,
pipe_config->dp_m2_n2.gmch_m,
pipe_config->dp_m2_n2.gmch_n,
pipe_config->dp_m2_n2.link_m,
9852,13 → 12011,17
pipe_config->has_infoframe);
 
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
intel_dump_crtc_timings(&pipe_config->adjusted_mode);
drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
DRM_DEBUG_KMS("pipe src size: %dx%d\n",
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id);
DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
9869,52 → 12032,83
pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
}
 
static bool encoders_cloneable(const struct intel_encoder *a,
const struct intel_encoder *b)
{
/* masks could be asymmetric, so check both ways */
return a == b || (a->cloneable & (1 << b->type) &&
b->cloneable & (1 << a->type));
if (IS_BROXTON(dev)) {
DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.ebb0,
pipe_config->dpll_hw_state.ebb4,
pipe_config->dpll_hw_state.pll0,
pipe_config->dpll_hw_state.pll1,
pipe_config->dpll_hw_state.pll2,
pipe_config->dpll_hw_state.pll3,
pipe_config->dpll_hw_state.pll6,
pipe_config->dpll_hw_state.pll8,
pipe_config->dpll_hw_state.pll9,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
} else if (IS_SKYLAKE(dev)) {
DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.ctrl1,
pipe_config->dpll_hw_state.cfgcr1,
pipe_config->dpll_hw_state.cfgcr2);
} else if (HAS_DDI(dev)) {
DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.wrpll,
pipe_config->dpll_hw_state.spll);
} else {
DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
"fp0: 0x%x, fp1: 0x%x\n",
pipe_config->dpll_hw_state.dpll,
pipe_config->dpll_hw_state.dpll_md,
pipe_config->dpll_hw_state.fp0,
pipe_config->dpll_hw_state.fp1);
}
 
static bool check_single_encoder_cloning(struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *source_encoder;
DRM_DEBUG_KMS("planes on this crtc\n");
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe != crtc->pipe)
continue;
 
for_each_intel_encoder(dev, source_encoder) {
if (source_encoder->new_crtc != crtc)
state = to_intel_plane_state(plane->state);
fb = state->base.fb;
if (!fb) {
DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
"disabled, scaler_id = %d\n",
plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
plane->base.id, intel_plane->pipe,
(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
drm_plane_index(plane), state->scaler_id);
continue;
 
if (!encoders_cloneable(encoder, source_encoder))
return false;
}
 
return true;
DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
plane->base.id, intel_plane->pipe,
crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
drm_plane_index(plane));
DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
fb->base.id, fb->width, fb->height, fb->pixel_format);
DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
state->scaler_id,
state->src.x1 >> 16, state->src.y1 >> 16,
drm_rect_width(&state->src) >> 16,
drm_rect_height(&state->src) >> 16,
state->dst.x1, state->dst.y1,
drm_rect_width(&state->dst), drm_rect_height(&state->dst));
}
 
static bool check_encoder_cloning(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
 
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
continue;
 
if (!check_single_encoder_cloning(crtc, encoder))
return false;
}
 
return true;
}
 
static bool check_digital_port_conflicts(struct drm_device *dev)
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
{
struct intel_connector *connector;
struct drm_device *dev = state->dev;
struct drm_connector *connector;
unsigned int used_ports = 0;
 
/*
9922,15 → 12116,21
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
list_for_each_entry(connector,
&dev->mode_config.connector_list, base.head) {
struct intel_encoder *encoder = connector->new_encoder;
drm_for_each_connector(connector, dev) {
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
 
if (!encoder)
connector_state = drm_atomic_get_existing_connector_state(state, connector);
if (!connector_state)
connector_state = connector->state;
 
if (!connector_state->best_encoder)
continue;
 
WARN_ON(!encoder->new_crtc);
encoder = to_intel_encoder(connector_state->best_encoder);
 
WARN_ON(!connector_state->crtc);
 
switch (encoder->type) {
unsigned int port_mask;
case INTEL_OUTPUT_UNKNOWN:
9954,37 → 12154,54
return true;
}
 
static struct intel_crtc_config *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_display_mode *mode)
static void
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
struct intel_crtc_config *pipe_config;
int plane_bpp, ret = -EINVAL;
bool retry = true;
struct drm_crtc_state tmp_state;
struct intel_crtc_scaler_state scaler_state;
struct intel_dpll_hw_state dpll_hw_state;
enum intel_dpll_id shared_dpll;
uint32_t ddi_pll_sel;
bool force_thru;
 
if (!check_encoder_cloning(to_intel_crtc(crtc))) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
return ERR_PTR(-EINVAL);
}
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
* fixed, so that the crtc_state can be safely duplicated. For now,
* only fields that are know to not cause problems are preserved. */
 
if (!check_digital_port_conflicts(dev)) {
DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
return ERR_PTR(-EINVAL);
tmp_state = crtc_state->base;
scaler_state = crtc_state->scaler_state;
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
ddi_pll_sel = crtc_state->ddi_pll_sel;
force_thru = crtc_state->pch_pfit.force_thru;
 
memset(crtc_state, 0, sizeof *crtc_state);
 
crtc_state->base = tmp_state;
crtc_state->scaler_state = scaler_state;
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
crtc_state->ddi_pll_sel = ddi_pll_sel;
crtc_state->pch_pfit.force_thru = force_thru;
}
 
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config)
return ERR_PTR(-ENOMEM);
static int
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_atomic_state *state = pipe_config->base.state;
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int base_bpp, ret = -EINVAL;
int i;
bool retry = true;
 
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
clear_intel_crtc_state(pipe_config);
 
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
/*
* Sanitize sync polarity flags based on requested ones. If neither
9991,21 → 12208,17
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
if (!(pipe_config->adjusted_mode.flags &
if (!(pipe_config->base.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
 
if (!(pipe_config->adjusted_mode.flags &
if (!(pipe_config->base.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
 
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
* source plane bpp so that dithering can be selected on mismatches
* after encoders and crtc also have had their say. */
plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
fb, pipe_config);
if (plane_bpp < 0)
base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
pipe_config);
if (base_bpp < 0)
goto fail;
 
/*
10016,9 → 12229,9
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
drm_crtc_get_hv_timing(&pipe_config->base.mode,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
 
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
10026,17 → 12239,19
pipe_config->pixel_multiplier = 1;
 
/* Fill in default crtc timings, allow encoders to overwrite them. */
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
CRTC_STEREO_DOUBLE);
 
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
for_each_intel_encoder(dev, encoder) {
 
if (&encoder->new_crtc->base != crtc)
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc)
continue;
 
encoder = to_intel_encoder(connector_state->best_encoder);
 
if (!(encoder->compute_config(encoder, pipe_config))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
10046,7 → 12261,7
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
* pipe_config->pixel_multiplier;
 
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
10066,216 → 12281,187
goto encoder_retry;
}
 
pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
/* Dithering seems to not pass-through bits correctly when it should, so
* only enable it on 6bpc panels. */
pipe_config->dither = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
return pipe_config;
fail:
kfree(pipe_config);
return ERR_PTR(ret);
return ret;
}
 
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
* simplicity we use the crtc's pipe number (because it's easier to obtain). */
static void
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
unsigned *prepare_pipes, unsigned *disable_pipes)
intel_modeset_update_crtc_state(struct drm_atomic_state *state)
{
struct intel_crtc *intel_crtc;
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_crtc *tmp_crtc;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
 
*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
/* Double check state. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
 
/* Check which crtcs have changed outputs connected to them, these need
* to be part of the prepare_pipes mask. We don't (yet) support global
* modeset across multiple crtcs, so modeset_pipes will only have one
* bit set at most. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->base.encoder == &connector->new_encoder->base)
continue;
 
if (connector->base.encoder) {
tmp_crtc = connector->base.encoder->crtc;
 
*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
/* Update hwmode for vblank functions */
if (crtc->state->active)
crtc->hwmode = crtc->state->adjusted_mode;
else
crtc->hwmode.crtc_clock = 0;
}
 
if (connector->new_encoder)
*prepare_pipes |=
1 << connector->new_encoder->new_crtc->pipe;
}
 
for_each_intel_encoder(dev, encoder) {
if (encoder->base.crtc == &encoder->new_crtc->base)
continue;
static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;
 
if (encoder->base.crtc) {
tmp_crtc = encoder->base.crtc;
if (clock1 == clock2)
return true;
 
*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
}
if (!clock1 || !clock2)
return false;
 
if (encoder->new_crtc)
*prepare_pipes |= 1 << encoder->new_crtc->pipe;
}
diff = abs(clock1 - clock2);
 
/* Check for pipes that will be enabled/disabled ... */
for_each_intel_crtc(dev, intel_crtc) {
if (intel_crtc->base.enabled == intel_crtc->new_enabled)
continue;
if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
return true;
 
if (!intel_crtc->new_enabled)
*disable_pipes |= 1 << intel_crtc->pipe;
else
*prepare_pipes |= 1 << intel_crtc->pipe;
return false;
}
 
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
base.head) \
if (mask & (1 <<(intel_crtc)->pipe))
 
/* set_mode is also used to update properties on life display pipes. */
intel_crtc = to_intel_crtc(crtc);
if (intel_crtc->new_enabled)
*prepare_pipes |= 1 << intel_crtc->pipe;
 
/*
* For simplicity do a full modeset on any pipe where the output routing
* changed. We could be more clever, but that would require us to be
* more careful with calling the relevant encoder->mode_set functions.
*/
if (*prepare_pipes)
*modeset_pipes = *prepare_pipes;
 
/* ... and mask these out. */
*modeset_pipes &= ~(*disable_pipes);
*prepare_pipes &= ~(*disable_pipes);
 
/*
* HACK: We don't (yet) fully support global modesets. intel_set_config
* obies this rule, but the modeset restore mode of
* intel_modeset_setup_hw_state does not.
*/
*modeset_pipes &= 1 << intel_crtc->pipe;
*prepare_pipes &= 1 << intel_crtc->pipe;
 
DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
*modeset_pipes, *prepare_pipes, *disable_pipes);
}
 
static bool intel_crtc_in_use(struct drm_crtc *crtc)
static bool
intel_compare_m_n(unsigned int m, unsigned int n,
unsigned int m2, unsigned int n2,
bool exact)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc)
if (m == m2 && n == n2)
return true;
 
if (exact || !m || !n || !m2 || !n2)
return false;
}
 
static void
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc;
struct drm_connector *connector;
BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
 
intel_shared_dpll_commit(dev_priv);
 
for_each_intel_encoder(dev, intel_encoder) {
if (!intel_encoder->base.crtc)
continue;
 
intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
 
if (prepare_pipes & (1 << intel_crtc->pipe))
intel_encoder->connectors_active = false;
if (m > m2) {
while (m > m2) {
m2 <<= 1;
n2 <<= 1;
}
 
intel_modeset_commit_output_state(dev);
 
/* Double check state. */
for_each_intel_crtc(dev, intel_crtc) {
WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
WARN_ON(intel_crtc->new_config &&
intel_crtc->new_config != &intel_crtc->config);
WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
} else if (m < m2) {
while (m < m2) {
m <<= 1;
n <<= 1;
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder || !connector->encoder->crtc)
continue;
 
intel_crtc = to_intel_crtc(connector->encoder->crtc);
 
if (prepare_pipes & (1 << intel_crtc->pipe)) {
struct drm_property *dpms_property =
dev->mode_config.dpms_property;
 
connector->dpms = DRM_MODE_DPMS_ON;
drm_object_property_set_value(&connector->base,
dpms_property,
DRM_MODE_DPMS_ON);
 
intel_encoder = to_intel_encoder(connector->encoder);
intel_encoder->connectors_active = true;
}
}
 
return m == m2 && n == n2;
}
 
static bool intel_fuzzy_clock_check(int clock1, int clock2)
static bool
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2,
bool adjust)
{
int diff;
if (m_n->tu == m2_n2->tu &&
intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
intel_compare_m_n(m_n->link_m, m_n->link_n,
m2_n2->link_m, m2_n2->link_n, !adjust)) {
if (adjust)
*m2_n2 = *m_n;
 
if (clock1 == clock2)
return true;
}
 
if (!clock1 || !clock2)
return false;
 
diff = abs(clock1 - clock2);
 
if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
return true;
 
return false;
}
 
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
base.head) \
if (mask & (1 <<(intel_crtc)->pipe))
 
static bool
intel_pipe_config_compare(struct drm_device *dev,
struct intel_crtc_config *current_config,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *current_config,
struct intel_crtc_state *pipe_config,
bool adjust)
{
bool ret = true;
 
#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
do { \
if (!adjust) \
DRM_ERROR(fmt, ##__VA_ARGS__); \
else \
DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
} while (0)
 
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
DRM_ERROR("mismatch in " #name " " \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected 0x%08x, found 0x%08x)\n", \
current_config->name, \
pipe_config->name); \
return false; \
ret = false; \
}
 
#define PIPE_CONF_CHECK_I(name) \
if (current_config->name != pipe_config->name) { \
DRM_ERROR("mismatch in " #name " " \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
return false; \
ret = false; \
}
 
#define PIPE_CONF_CHECK_M_N(name) \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
adjust)) { \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected tu %i gmch %i/%i link %i/%i, " \
"found tu %i, gmch %i/%i link %i/%i)\n", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
current_config->name.link_m, \
current_config->name.link_n, \
pipe_config->name.tu, \
pipe_config->name.gmch_m, \
pipe_config->name.gmch_n, \
pipe_config->name.link_m, \
pipe_config->name.link_n); \
ret = false; \
}
 
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name, adjust) && \
!intel_compare_link_m_n(&current_config->alt_name, \
&pipe_config->name, adjust)) { \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected tu %i gmch %i/%i link %i/%i, " \
"or tu %i gmch %i/%i link %i/%i, " \
"found tu %i, gmch %i/%i link %i/%i)\n", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
current_config->name.link_m, \
current_config->name.link_n, \
current_config->alt_name.tu, \
current_config->alt_name.gmch_m, \
current_config->alt_name.gmch_n, \
current_config->alt_name.link_m, \
current_config->alt_name.link_n, \
pipe_config->name.tu, \
pipe_config->name.gmch_m, \
pipe_config->name.gmch_n, \
pipe_config->name.link_m, \
pipe_config->name.link_n); \
ret = false; \
}
 
/* This is required for BDW+ where there is only one set of registers for
* switching between high and low RR.
* This macro can be used whenever a comparison has to be made between one
10284,30 → 12470,30
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
if ((current_config->name != pipe_config->name) && \
(current_config->alt_name != pipe_config->name)) { \
DRM_ERROR("mismatch in " #name " " \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i or %i, found %i)\n", \
current_config->name, \
current_config->alt_name, \
pipe_config->name); \
return false; \
ret = false; \
}
 
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
DRM_ERROR("mismatch in " #name "(" #mask ") " \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
"(expected %i, found %i)\n", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
return false; \
ret = false; \
}
 
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
DRM_ERROR("mismatch in " #name " " \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
return false; \
ret = false; \
}
 
#define PIPE_CONF_QUIRK(quirk) \
10317,49 → 12503,32
 
PIPE_CONF_CHECK_I(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
PIPE_CONF_CHECK_I(fdi_m_n.link_m);
PIPE_CONF_CHECK_I(fdi_m_n.link_n);
PIPE_CONF_CHECK_I(fdi_m_n.tu);
PIPE_CONF_CHECK_M_N(fdi_m_n);
 
PIPE_CONF_CHECK_I(has_dp_encoder);
PIPE_CONF_CHECK_I(lane_count);
 
if (INTEL_INFO(dev)->gen < 8) {
PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
PIPE_CONF_CHECK_I(dp_m_n.link_m);
PIPE_CONF_CHECK_I(dp_m_n.link_n);
PIPE_CONF_CHECK_I(dp_m_n.tu);
PIPE_CONF_CHECK_M_N(dp_m_n);
 
if (current_config->has_drrs) {
PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
PIPE_CONF_CHECK_I(dp_m2_n2.tu);
}
} else {
PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
}
if (current_config->has_drrs)
PIPE_CONF_CHECK_M_N(dp_m2_n2);
} else
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
 
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
 
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
10370,45 → 12539,39
 
PIPE_CONF_CHECK_I(has_audio);
 
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
 
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
 
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
 
/*
* FIXME: BIOS likes to set up a cloned config with lvds+external
* screen. Since we don't yet re-compute the pipe config when moving
* just the lvds port away to another pipe the sw tracking won't match.
*
* Proper atomic modesets with recomputed global state will fix this.
* Until then just don't check gmch state for inherited modes.
*/
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
PIPE_CONF_CHECK_I(gmch_pfit.control);
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
}
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
 
if (!adjust) {
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
 
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos);
PIPE_CONF_CHECK_I(pch_pfit.size);
PIPE_CONF_CHECK_X(pch_pfit.pos);
PIPE_CONF_CHECK_X(pch_pfit.size);
}
 
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
}
 
/* BDW+ don't expose a synchronous way to read the state */
if (IS_HASWELL(dev))
PIPE_CONF_CHECK_I(ips_enabled);
10423,6 → 12586,7
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
PIPE_CONF_CHECK_X(dpll_hw_state.spll);
PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
10430,7 → 12594,7
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
 
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
 
#undef PIPE_CONF_CHECK_X
10439,8 → 12603,9
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
#undef INTEL_ERR_OR_DBG_KMS
 
return true;
return ret;
}
 
static void check_wm_state(struct drm_device *dev)
10464,7 → 12629,7
continue;
 
/* planes */
for_each_plane(pipe, plane) {
for_each_plane(dev_priv, pipe, plane) {
hw_entry = &hw_ddb.plane[pipe][plane];
sw_entry = &sw_ddb->plane[pipe][plane];
 
10479,8 → 12644,8
}
 
/* cursor */
hw_entry = &hw_ddb.cursor[pipe];
sw_entry = &sw_ddb->cursor[pipe];
hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
 
if (skl_ddb_entry_equal(hw_entry, sw_entry))
continue;
10494,18 → 12659,23
}
 
static void
check_connector_state(struct drm_device *dev)
check_connector_state(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct intel_connector *connector;
struct drm_connector_state *old_conn_state;
struct drm_connector *connector;
int i;
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
struct drm_connector_state *state = connector->state;
 
/* This also checks the encoder/connector hw state with the
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
intel_connector_check_state(to_intel_connector(connector));
 
WARN(&connector->new_encoder->base != connector->base.encoder,
"connector's staged encoder doesn't match current encoder\n");
I915_STATE_WARN(state->best_encoder != encoder,
"connector's atomic encoder doesn't match legacy encoder\n");
}
}
 
10517,124 → 12687,107
 
for_each_intel_encoder(dev, encoder) {
bool enabled = false;
bool active = false;
enum pipe pipe, tracked_pipe;
enum pipe pipe;
 
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
 
WARN(&encoder->new_crtc->base != encoder->base.crtc,
"encoder's stage crtc doesn't match current crtc\n");
WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->base.encoder != &encoder->base)
for_each_intel_connector(dev, connector) {
if (connector->base.state->best_encoder != &encoder->base)
continue;
enabled = true;
if (connector->base.dpms != DRM_MODE_DPMS_OFF)
active = true;
 
I915_STATE_WARN(connector->base.state->crtc !=
encoder->base.crtc,
"connector's crtc doesn't match encoder crtc\n");
}
/*
* for MST connectors if we unplug the connector is gone
* away but the encoder is still connected to a crtc
* until a modeset happens in response to the hotplug.
*/
if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
continue;
 
WARN(!!encoder->base.crtc != enabled,
I915_STATE_WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
!!encoder->base.crtc, enabled);
WARN(active && !encoder->base.crtc,
"active encoder with no crtc\n");
 
WARN(encoder->connectors_active != active,
"encoder's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, encoder->connectors_active);
if (!encoder->base.crtc) {
bool active;
 
active = encoder->get_hw_state(encoder, &pipe);
WARN(active != encoder->connectors_active,
"encoder's hw state doesn't match sw tracking "
"(expected %i, found %i)\n",
encoder->connectors_active, active);
 
if (!encoder->base.crtc)
continue;
 
tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
WARN(active && pipe != tracked_pipe,
"active encoder's pipe doesn't match"
"(expected %i, found %i)\n",
tracked_pipe, pipe);
 
I915_STATE_WARN(active,
"encoder detached but still enabled on pipe %c.\n",
pipe_name(pipe));
}
}
}
 
static void
check_crtc_state(struct drm_device *dev)
check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_crtc_config pipe_config;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
int i;
 
for_each_intel_crtc(dev, crtc) {
bool enabled = false;
bool active = false;
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config, *sw_config;
bool active;
 
memset(&pipe_config, 0, sizeof(pipe_config));
if (!needs_modeset(crtc->state) &&
!to_intel_crtc_state(crtc->state)->update_pipe)
continue;
 
__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
pipe_config = to_intel_crtc_state(old_crtc_state);
memset(pipe_config, 0, sizeof(*pipe_config));
pipe_config->base.crtc = crtc;
pipe_config->base.state = old_state;
 
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
crtc->base.id);
 
WARN(crtc->active && !crtc->base.enabled,
"active crtc, but not enabled in sw tracking\n");
active = dev_priv->display.get_pipe_config(intel_crtc,
pipe_config);
 
for_each_intel_encoder(dev, encoder) {
if (encoder->base.crtc != &crtc->base)
continue;
enabled = true;
if (encoder->connectors_active)
active = true;
}
/* hw state is inconsistent with the pipe quirk */
if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
active = crtc->state->active;
 
WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
WARN(enabled != crtc->base.enabled,
"crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled);
I915_STATE_WARN(crtc->state->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->state->active, active);
 
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
I915_STATE_WARN(intel_crtc->active != crtc->state->active,
"transitional active state does not match atomic hw state "
"(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
 
/* hw state is inconsistent with the pipe quirk */
if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
active = crtc->active;
for_each_encoder_on_crtc(dev, crtc, encoder) {
enum pipe pipe;
 
for_each_intel_encoder(dev, encoder) {
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
if (encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
active = encoder->get_hw_state(encoder, &pipe);
I915_STATE_WARN(active != crtc->state->active,
"[ENCODER:%i] active %i with crtc active %i\n",
encoder->base.base.id, active, crtc->state->active);
 
I915_STATE_WARN(active && intel_crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
 
if (active)
encoder->get_config(encoder, pipe_config);
}
 
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
if (!crtc->state->active)
continue;
 
if (active &&
!intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(crtc, &pipe_config,
sw_config = to_intel_crtc_state(crtc->state);
if (!intel_pipe_config_compare(dev, sw_config,
pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(intel_crtc, pipe_config,
"[hw state]");
intel_dump_pipe_config(crtc, &crtc->config,
intel_dump_pipe_config(intel_crtc, sw_config,
"[sw state]");
}
}
10659,47 → 12812,48
 
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
WARN(pll->active > hweight32(pll->config.crtc_mask),
I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
"more active pll users than references: %i vs %i\n",
pll->active, hweight32(pll->config.crtc_mask));
WARN(pll->active && !pll->on,
I915_STATE_WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
WARN(pll->on && !pll->active,
I915_STATE_WARN(pll->on && !pll->active,
"pll in on but not on in use in sw tracking\n");
WARN(pll->on != active,
I915_STATE_WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
 
for_each_intel_crtc(dev, crtc) {
if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
enabled_crtcs++;
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
active_crtcs++;
}
WARN(pll->active != active_crtcs,
I915_STATE_WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
hweight32(pll->config.crtc_mask), enabled_crtcs);
 
WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
}
 
void
intel_modeset_check_state(struct drm_device *dev)
static void
intel_modeset_check_state(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
check_wm_state(dev);
check_connector_state(dev);
check_connector_state(dev, old_state);
check_encoder_state(dev);
check_crtc_state(dev);
check_crtc_state(dev, old_state);
check_shared_dpll_state(dev);
}
 
void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock)
{
/*
10706,9 → 12860,9
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
pipe_config->adjusted_mode.crtc_clock, dotclock);
pipe_config->base.adjusted_mode.crtc_clock, dotclock);
}
 
static void update_scanline_offset(struct intel_crtc *crtc)
10734,11 → 12888,11
* one to the value.
*/
if (IS_GEN2(dev)) {
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
int vtotal;
 
vtotal = mode->crtc_vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal = adjusted_mode->crtc_vtotal;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
 
crtc->scanline_offset = vtotal - 1;
10749,679 → 12903,412
crtc->scanline_offset = 1;
}
 
static struct intel_crtc_config *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
unsigned *modeset_pipes,
unsigned *prepare_pipes,
unsigned *disable_pipes)
static void intel_modeset_clear_plls(struct drm_atomic_state *state)
{
struct intel_crtc_config *pipe_config = NULL;
 
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
 
if ((*modeset_pipes) == 0)
goto out;
 
/*
* Note this needs changes when we start tracking multiple modes
* and crtcs. At that point we'll need to compute the whole config
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
if (IS_ERR(pipe_config)) {
goto out;
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
 
out:
return pipe_config;
}
 
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll_config *shared_dpll = NULL;
struct intel_crtc *intel_crtc;
int ret = 0;
struct intel_crtc_state *intel_crtc_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
 
saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
if (!dev_priv->display.crtc_compute_clock)
return;
 
*saved_mode = crtc->mode;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
int dpll;
 
if (modeset_pipes)
to_intel_crtc(crtc)->new_config = pipe_config;
intel_crtc = to_intel_crtc(crtc);
intel_crtc_state = to_intel_crtc_state(crtc_state);
dpll = intel_crtc_state->shared_dpll;
 
/*
* See if the config requires any additional preparation, e.g.
* to adjust global state with pipes off. We need to do this
* here so we can get the modeset_pipe updated config for the new
* mode set on this crtc. For other crtcs we need to use the
* adjusted_mode bits in the crtc directly.
*/
if (IS_VALLEYVIEW(dev)) {
valleyview_modeset_global_pipes(dev, &prepare_pipes);
if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
continue;
 
/* may have added more to prepare_pipes than we should */
prepare_pipes &= ~disable_pipes;
}
intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
 
if (dev_priv->display.crtc_compute_clock) {
unsigned clear_pipes = modeset_pipes | disable_pipes;
if (!shared_dpll)
shared_dpll = intel_atomic_get_shared_dpll_state(state);
 
ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
if (ret)
goto done;
 
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
ret = dev_priv->display.crtc_compute_clock(intel_crtc);
if (ret) {
intel_shared_dpll_abort_config(dev_priv);
goto done;
shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
}
}
}
 
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
 
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
if (intel_crtc->base.enabled)
dev_priv->display.crtc_disable(&intel_crtc->base);
}
 
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
* to set it here already despite that we pass it down the callchain.
*
* Note we'll need to fix this up when we start tracking multiple
* pipes; here we assume a single modeset_pipe and only track the
* single crtc and mode.
*/
if (modeset_pipes) {
crtc->mode = *mode;
/* mode_set/enable/disable functions rely on a correct pipe
* config. */
to_intel_crtc(crtc)->config = *pipe_config;
to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
 
/*
* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
* multiple pipes, and planes are enabled after the pipe, we need to wait at
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
*/
drm_calc_timestamping_constants(crtc,
&pipe_config->adjusted_mode);
}
static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state;
struct intel_crtc *intel_crtc;
struct drm_crtc *crtc;
struct intel_crtc_state *first_crtc_state = NULL;
struct intel_crtc_state *other_crtc_state = NULL;
enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
int i;
 
/* Only after disabling all output pipelines that will be changed can we
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
/* look at all crtc's that are going to be enabled in during modeset */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
intel_crtc = to_intel_crtc(crtc);
 
modeset_update_crtc_power_domains(dev);
if (!crtc_state->active || !needs_modeset(crtc_state))
continue;
 
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
goto done;
if (first_crtc_state) {
other_crtc_state = to_intel_crtc_state(crtc_state);
break;
} else {
first_crtc_state = to_intel_crtc_state(crtc_state);
first_pipe = intel_crtc->pipe;
}
if (old_fb)
intel_unpin_fb_obj(old_obj);
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
mutex_unlock(&dev->struct_mutex);
 
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
}
 
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
update_scanline_offset(intel_crtc);
/* No workaround needed? */
if (!first_crtc_state)
return 0;
 
dev_priv->display.crtc_enable(&intel_crtc->base);
}
/* w/a possibly needed, check how many crtc's are already enabled. */
for_each_intel_crtc(state->dev, intel_crtc) {
struct intel_crtc_state *pipe_config;
 
/* FIXME: add subpixel order */
done:
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(pipe_config))
return PTR_ERR(pipe_config);
 
kfree(pipe_config);
kfree(saved_mode);
return ret;
}
pipe_config->hsw_workaround_pipe = INVALID_PIPE;
 
static int intel_set_mode_pipes(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
{
int ret;
if (!pipe_config->base.active ||
needs_modeset(&pipe_config->base))
continue;
 
ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
prepare_pipes, disable_pipes);
/* 2 or more enabled crtcs means no need for w/a */
if (enabled_pipe != INVALID_PIPE)
return 0;
 
if (ret == 0)
intel_modeset_check_state(crtc->dev);
 
return ret;
enabled_pipe = intel_crtc->pipe;
}
 
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
struct intel_crtc_config *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
if (enabled_pipe != INVALID_PIPE)
first_crtc_state->hsw_workaround_pipe = enabled_pipe;
else if (other_crtc_state)
other_crtc_state->hsw_workaround_pipe = first_pipe;
 
pipe_config = intel_modeset_compute_config(crtc, mode, fb,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
 
if (IS_ERR(pipe_config))
return PTR_ERR(pipe_config);
 
return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
modeset_pipes, prepare_pipes,
disable_pipes);
return 0;
}
 
void intel_crtc_restore_mode(struct drm_crtc *crtc)
static int intel_modeset_all_pipes(struct drm_atomic_state *state)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
}
 
#undef for_each_intel_crtc_masked
 
static void intel_set_config_free(struct intel_set_config *config)
{
if (!config)
return;
 
kfree(config->save_connector_encoders);
kfree(config->save_encoder_crtcs);
kfree(config->save_crtc_enabled);
kfree(config);
}
 
static int intel_set_config_save_state(struct drm_device *dev,
struct intel_set_config *config)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int count;
struct drm_crtc_state *crtc_state;
int ret = 0;
 
config->save_crtc_enabled =
kcalloc(dev->mode_config.num_crtc,
sizeof(bool), GFP_KERNEL);
if (!config->save_crtc_enabled)
return -ENOMEM;
/* add all active pipes to the state */
for_each_crtc(state->dev, crtc) {
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
 
config->save_encoder_crtcs =
kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!config->save_encoder_crtcs)
return -ENOMEM;
if (!crtc_state->active || needs_modeset(crtc_state))
continue;
 
config->save_connector_encoders =
kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_encoder *), GFP_KERNEL);
if (!config->save_connector_encoders)
return -ENOMEM;
crtc_state->mode_changed = true;
 
/* Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
* restored, not the drivers personal bookkeeping.
*/
count = 0;
for_each_crtc(dev, crtc) {
config->save_crtc_enabled[count++] = crtc->enabled;
}
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
break;
 
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
config->save_encoder_crtcs[count++] = encoder->crtc;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
break;
}
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
config->save_connector_encoders[count++] = connector->encoder;
return ret;
}
 
return 0;
}
 
static void intel_set_config_restore_state(struct drm_device *dev,
struct intel_set_config *config)
static int intel_modeset_checks(struct drm_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
int count;
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
count = 0;
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = config->save_crtc_enabled[count++];
 
if (crtc->new_enabled)
crtc->new_config = &crtc->config;
else
crtc->new_config = NULL;
if (!check_digital_port_conflicts(state)) {
DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
return -EINVAL;
}
 
count = 0;
for_each_intel_encoder(dev, encoder) {
encoder->new_crtc =
to_intel_crtc(config->save_encoder_crtcs[count++]);
}
/*
* See if the config requires any additional preparation, e.g.
* to adjust global state with pipes off. We need to do this
* here so we can get the modeset_pipe updated config for the new
* mode set on this crtc. For other crtcs we need to use the
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
unsigned int cdclk;
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
connector->new_encoder =
to_intel_encoder(config->save_connector_encoders[count++]);
}
}
ret = dev_priv->display.modeset_calc_cdclk(state);
 
static bool
is_crtc_connector_off(struct drm_mode_set *set)
{
int i;
cdclk = to_intel_atomic_state(state)->cdclk;
if (!ret && cdclk != dev_priv->cdclk_freq)
ret = intel_modeset_all_pipes(state);
 
if (set->num_connectors == 0)
return false;
if (ret < 0)
return ret;
} else
to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
 
if (WARN_ON(set->connectors == NULL))
return false;
intel_modeset_clear_plls(state);
 
for (i = 0; i < set->num_connectors; i++)
if (set->connectors[i]->encoder &&
set->connectors[i]->encoder->crtc == set->crtc &&
set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
return true;
if (IS_HASWELL(dev))
return haswell_mode_set_planes_workaround(state);
 
return false;
return 0;
}
 
static void
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
struct intel_set_config *config)
/**
* intel_atomic_check - validate state object
* @dev: drm device
* @state: state to validate
*/
static int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret, i;
bool any_ms = false;
 
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
} else if (set->crtc->primary->fb != set->fb) {
/*
* If we have no fb, we can only flip as long as the crtc is
* active, otherwise we need a full mode set. The crtc may
* be active if we've only disabled the primary plane, or
* in fastboot situations.
*/
if (set->crtc->primary->fb == NULL) {
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
 
if (intel_crtc->active) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
DRM_DEBUG_KMS("inactive crtc, full mode set\n");
config->mode_changed = true;
}
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->pixel_format !=
set->crtc->primary->fb->pixel_format) {
config->mode_changed = true;
} else {
config->fb_changed = true;
}
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
 
if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
config->fb_changed = true;
memset(&to_intel_crtc(crtc)->atomic, 0,
sizeof(struct intel_crtc_atomic_commit));
 
if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
DRM_DEBUG_KMS("modes are different, full mode set\n");
drm_mode_debug_printmodeline(&set->crtc->mode);
drm_mode_debug_printmodeline(set->mode);
config->mode_changed = true;
}
/* Catch I915_MODE_FLAG_INHERITED */
if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
crtc_state->mode_changed = true;
 
DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
set->crtc->base.id, config->mode_changed, config->fb_changed);
if (!crtc_state->enable) {
if (needs_modeset(crtc_state))
any_ms = true;
continue;
}
 
static int
intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
struct intel_set_config *config)
{
struct intel_connector *connector;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
int ro;
if (!needs_modeset(crtc_state))
continue;
 
/* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
/* FIXME: For only active_changed we shouldn't need to do any
* state recomputation at all. */
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
/* Otherwise traverse passed in connector list and get encoders
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base) {
connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
break;
}
}
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
return ret;
 
/* If we disable the crtc, disable all its connectors. Also, if
* the connector is on the changing crtc but not on the new
* connector list, disable it. */
if ((!set->fb || ro == set->num_connectors) &&
connector->base.encoder &&
connector->base.encoder->crtc == set->crtc) {
connector->new_encoder = NULL;
ret = intel_modeset_pipe_config(crtc, pipe_config);
if (ret)
return ret;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.base.id,
connector->base.name);
if (i915.fastboot &&
intel_pipe_config_compare(state->dev,
to_intel_crtc_state(crtc->state),
pipe_config, true)) {
crtc_state->mode_changed = false;
to_intel_crtc_state(crtc_state)->update_pipe = true;
}
 
if (needs_modeset(crtc_state)) {
any_ms = true;
 
if (&connector->new_encoder->base != connector->base.encoder) {
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
config->mode_changed = true;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
 
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
needs_modeset(crtc_state) ?
"[modeset]" : "[fastset]");
}
/* connector->new_encoder is now updated for all connectors. */
 
/* Update crtc of enabled connectors. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
struct drm_crtc *new_crtc;
if (any_ms) {
ret = intel_modeset_checks(state);
 
if (!connector->new_encoder)
continue;
if (ret)
return ret;
} else
to_intel_atomic_state(state)->cdclk =
to_i915(state->dev)->cdclk_freq;
 
new_crtc = connector->new_encoder->base.crtc;
 
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base)
new_crtc = set->crtc;
return drm_atomic_helper_check_planes(state->dev, state);
}
 
/* Make sure the new CRTC will work with the encoder */
if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
new_crtc)) {
/**
* intel_atomic_commit - commit validated state object
* @dev: DRM device
* @state: the top-level driver state object
* @async: asynchronous commit
*
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
* FIXME: Atomic modeset support for i915 is not yet complete. At the moment
* we can only handle plane-related operations and do not yet support
* asynchronous commit.
*
* RETURNS
* Zero for success or -errno.
*/
static int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret = 0;
int i;
bool any_ms = false;
 
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
return -EINVAL;
}
connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
connector->base.name,
new_crtc->base.id);
}
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
 
/* Check for any encoders that needs to be disabled. */
for_each_intel_encoder(dev, encoder) {
int num_connectors = 0;
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
num_connectors++;
}
}
drm_atomic_helper_swap_state(dev, state);
 
if (num_connectors == 0)
encoder->new_crtc = NULL;
else if (num_connectors > 1)
return -EINVAL;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
DRM_DEBUG_KMS("crtc changed, full mode switch\n");
config->mode_changed = true;
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->new_encoder)
if (connector->new_encoder != connector->encoder)
connector->encoder = connector->new_encoder;
}
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
if (!needs_modeset(crtc->state))
continue;
 
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc == crtc) {
crtc->new_enabled = true;
break;
any_ms = true;
intel_pre_plane_update(intel_crtc);
 
if (crtc_state->active) {
intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_disable_shared_dpll(intel_crtc);
}
}
 
if (crtc->new_enabled != crtc->base.enabled) {
DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
crtc->new_enabled ? "en" : "dis");
config->mode_changed = true;
}
/* Only after disabling all output pipelines that will be changed can we
* update the the output configuration. */
intel_modeset_update_crtc_state(state);
 
if (crtc->new_enabled)
crtc->new_config = &crtc->config;
else
crtc->new_config = NULL;
}
if (any_ms) {
intel_shared_dpll_commit(state);
 
return 0;
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
modeset_update_crtc_power_domains(state);
}
 
static void disable_crtc_nofb(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
struct intel_connector *connector;
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state);
bool update_pipe = !modeset &&
to_intel_crtc_state(crtc->state)->update_pipe;
unsigned long put_domains = 0;
 
DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
pipe_name(crtc->pipe));
 
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
if (connector->new_encoder &&
connector->new_encoder->new_crtc == crtc)
connector->new_encoder = NULL;
if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
}
 
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc == crtc)
encoder->new_crtc = NULL;
}
if (update_pipe) {
put_domains = modeset_get_crtc_power_domains(crtc);
 
crtc->new_enabled = false;
crtc->new_config = NULL;
/* make sure intel_modeset_check_state runs */
any_ms = true;
}
 
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
struct intel_crtc_config *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
if (!modeset)
intel_pre_plane_update(intel_crtc);
 
BUG_ON(!set);
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
 
/* Enforce sane interface api - has been abused by the fb helper. */
BUG_ON(!set->mode && set->fb);
BUG_ON(set->fb && set->num_connectors == 0);
if (put_domains)
modeset_put_power_domains(dev_priv, put_domains);
 
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->fb->base.id,
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
intel_post_plane_update(intel_crtc);
}
 
dev = set->crtc->dev;
/* FIXME: add subpixel order */
 
ret = -ENOMEM;
config = kzalloc(sizeof(*config), GFP_KERNEL);
if (!config)
goto out_config;
// drm_atomic_helper_wait_for_vblanks(dev, state);
 
ret = intel_set_config_save_state(dev, config);
if (ret)
goto out_config;
drm_atomic_helper_cleanup_planes(dev, state);
 
save_set.crtc = set->crtc;
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
save_set.fb = set->crtc->primary->fb;
if (any_ms)
intel_modeset_check_state(dev, state);
 
/* Compute whether we need a full modeset, only an fb base update or no
* change at all. In the future we might also check whether only the
* mode changed, e.g. for LVDS where we only change the panel fitter in
* such cases. */
intel_set_config_compute_mode_changes(set, config);
drm_atomic_state_free(state);
 
ret = intel_modeset_stage_output_state(dev, set, config);
if (ret)
goto fail;
return 0;
}
 
pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
set->fb,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
goto fail;
} else if (pipe_config) {
if (pipe_config->has_audio !=
to_intel_crtc(set->crtc)->config.has_audio)
config->mode_changed = true;
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
int ret;
 
/*
* Note we have an issue here with infoframes: current code
* only updates them on the full mode set path per hw
* requirements. So here we should be checking for any
* required changes and forcing a mode set.
*/
state = drm_atomic_state_alloc(dev);
if (!state) {
DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
crtc->base.id);
return;
}
 
/* set_mode will free it in the mode_changed case */
if (!config->mode_changed)
kfree(pipe_config);
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 
intel_update_pipe_size(to_intel_crtc(set->crtc));
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (!ret) {
if (!crtc_state->active)
goto out;
 
if (config->mode_changed) {
ret = intel_set_mode_pipes(set->crtc, set->mode,
set->x, set->y, set->fb, pipe_config,
modeset_pipes, prepare_pipes,
disable_pipes);
} else if (config->fb_changed) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
 
// intel_crtc_wait_for_pending_flips(set->crtc);
 
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
 
/*
* We need to make sure the primary plane is re-enabled if it
* has previously been turned off.
*/
if (!intel_crtc->primary_enabled && ret == 0) {
WARN_ON(!intel_crtc->active);
intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
crtc_state->mode_changed = true;
ret = drm_atomic_commit(state);
}
 
/*
* In the fastboot case this may be our only check of the
* state after boot. It would be better to only do it on
* the first update, but we don't have a nice way of doing that
* (and really, set_config isn't used much for high freq page
* flipping, so increasing its cost here shouldn't be a big
* deal).
*/
if (i915.fastboot && ret == 0)
intel_modeset_check_state(set->crtc->dev);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(state->acquire_ctx);
goto retry;
}
 
if (ret) {
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
fail:
intel_set_config_restore_state(dev, config);
 
/*
* HACK: if the pipe was on, but we didn't have a framebuffer,
* force the pipe off to avoid oopsing in the modeset code
* due to fb==NULL. This should only happen during boot since
* we don't yet reconstruct the FB from the hardware state.
*/
if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
disable_crtc_nofb(to_intel_crtc(save_set.crtc));
 
/* Try to restore the config */
if (config->mode_changed &&
intel_set_mode(save_set.crtc, save_set.mode,
save_set.x, save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
if (ret)
out:
drm_atomic_state_free(state);
}
 
out_config:
intel_set_config_free(config);
return ret;
}
#undef for_each_intel_crtc_masked
 
static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = intel_crtc_gamma_set,
.set_config = intel_crtc_set_config,
.set_config = drm_atomic_helper_set_config,
.destroy = intel_crtc_destroy,
// .page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
};
 
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11524,91 → 13411,131
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
 
static int
intel_primary_plane_disable(struct drm_plane *plane)
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
* @fb: framebuffer to prepare for presentation
*
* Prepares a framebuffer for usage on a display plane. Generally this
* involves pinning the underlying object and updating the frontbuffer tracking
* bits. Some older platforms need special physical address handling for
* cursor planes.
*
* Returns 0 on success, negative error code on failure.
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb = new_state->fb;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
int ret = 0;
 
if (!plane->fb)
if (!obj)
return 0;
 
BUG_ON(!plane->crtc);
mutex_lock(&dev->struct_mutex);
 
intel_crtc = to_intel_crtc(plane->crtc);
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = 1;
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
}
 
/*
* Even though we checked plane->fb above, it's still possible that
* the primary plane has been implicitly disabled because the crtc
* coordinates given weren't visible, or because we detected
* that it was 100% covered by a sprite plane. Or, the CRTC may be
* off and we've set a fb, but haven't actually turned on the CRTC yet.
* In either case, we need to unpin the FB and let the fb pointer get
* updated, but otherwise we don't need to touch the hardware.
if (ret == 0)
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
 
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
/**
* intel_cleanup_plane_fb - Cleans up an fb after plane use
* @plane: drm plane to clean up for
* @fb: old framebuffer that was on plane
*
* Cleans up a framebuffer that has just been removed from a plane.
*/
if (!intel_crtc->primary_enabled)
goto disable_unpin;
void
intel_cleanup_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
 
// intel_crtc_wait_for_pending_flips(plane->crtc);
intel_disable_primary_hw_plane(plane, plane->crtc);
if (!obj)
return;
 
disable_unpin:
if (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical) {
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
intel_unpin_fb_obj(intel_fb_obj(plane->fb));
intel_unpin_fb_obj(old_state->fb, old_state);
mutex_unlock(&dev->struct_mutex);
plane->fb = NULL;
 
return 0;
}
}
 
static int
intel_check_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
int
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
int max_scale;
struct drm_device *dev;
struct drm_i915_private *dev_priv;
int crtc_clock, cdclk;
 
return drm_plane_helper_check_update(plane, crtc, fb,
src, dest, clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true, &state->visible);
if (!intel_crtc || !crtc_state)
return DRM_PLANE_HELPER_NO_SCALING;
 
dev = intel_crtc->base.dev;
dev_priv = dev->dev_private;
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
if (!crtc_clock || !cdclk)
return DRM_PLANE_HELPER_NO_SCALING;
 
/*
* skl max scale is lower of:
* close to 3 but not 3, -1 is for that purpose
* or
* cdclk/crtc_clock
*/
max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
 
return max_scale;
}
 
static int
intel_prepare_primary_plane(struct drm_plane *plane,
intel_check_primary_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
int ret;
struct drm_crtc *crtc = state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
bool can_position = false;
 
 
 
if (old_obj != obj) {
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
if (ret == 0)
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
DRM_DEBUG_KMS("pin & fence failed\n");
return ret;
/* use scaler when colorkey is not required */
if (INTEL_INFO(plane->dev)->gen >= 9 &&
state->ckey.flags == I915_SET_COLORKEY_NONE) {
min_scale = 1;
max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
can_position = true;
}
}
 
return 0;
return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
&state->dst, &state->clip,
min_scale, max_scale,
can_position, true,
&state->visible);
}
 
static void
11615,145 → 13542,80
intel_commit_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtc = state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb = plane->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
struct drm_rect *src = &state->src;
 
crtc->primary->fb = fb;
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
 
plane->fb = fb;
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
 
intel_plane->crtc_x = state->orig_dst.x1;
intel_plane->crtc_y = state->orig_dst.y1;
intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
intel_plane->src_x = state->orig_src.x1;
intel_plane->src_y = state->orig_src.y1;
intel_plane->src_w = drm_rect_width(&state->orig_src);
intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
if (!crtc->state->active)
return;
 
if (intel_crtc->active) {
/*
* FBC does not work on some platforms for rotated
* planes, so disable it when rotation is not 0 and
* update it when rotation is set back to 0.
*
* FIXME: This is redundant with the fbc update done in
* the primary plane enable function except that that
* one is done too late. We eventually need to unify
* this.
*/
if (intel_crtc->primary_enabled &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
dev_priv->fbc.plane == intel_crtc->plane &&
intel_plane->rotation != BIT(DRM_ROTATE_0)) {
intel_disable_fbc(dev);
dev_priv->display.update_primary_plane(crtc, fb,
state->src.x1 >> 16,
state->src.y1 >> 16);
}
 
if (state->visible) {
bool was_enabled = intel_crtc->primary_enabled;
static void
intel_disable_primary_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* FIXME: kill this fastboot hack */
intel_update_pipe_size(intel_crtc);
dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
}
 
intel_crtc->primary_enabled = true;
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *old_intel_state =
to_intel_crtc_state(old_crtc_state);
bool modeset = needs_modeset(crtc->state);
 
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
if (intel_crtc->atomic.update_wm_pre)
intel_update_watermarks(crtc);
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev) && !was_enabled)
intel_wait_for_vblank(dev, intel_crtc->pipe);
} else {
/*
* If clipping results in a non-visible primary plane,
* we'll disable the primary plane. Note that this is
* a bit different than what happens if userspace
* explicitly disables the plane by passing fb=0
* because plane->fb still gets set and pinned.
*/
intel_disable_primary_hw_plane(plane, crtc);
}
/* Perform vblank evasion around commit operation */
if (crtc->state->active)
intel_pipe_update_start(intel_crtc);
 
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
if (modeset)
return;
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
if (to_intel_crtc_state(crtc->state)->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_state);
else if (INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(intel_crtc);
}
 
if (old_fb && old_fb != fb) {
if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
}
 
static int
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct intel_plane_state state;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
 
state.crtc = crtc;
state.fb = fb;
 
/* sample coordinates in 16.16 fixed point */
state.src.x1 = src_x;
state.src.x2 = src_x + src_w;
state.src.y1 = src_y;
state.src.y2 = src_y + src_h;
 
/* integer pixels */
state.dst.x1 = crtc_x;
state.dst.x2 = crtc_x + crtc_w;
state.dst.y1 = crtc_y;
state.dst.y2 = crtc_y + crtc_h;
 
state.clip.x1 = 0;
state.clip.y1 = 0;
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
 
state.orig_src = state.src;
state.orig_dst = state.dst;
 
ret = intel_check_primary_plane(plane, &state);
if (ret)
return ret;
 
ret = intel_prepare_primary_plane(plane, &state);
if (ret)
return ret;
 
intel_commit_primary_plane(plane, &state);
 
return 0;
if (crtc->state->active)
intel_pipe_update_end(intel_crtc);
}
 
/* Common destruction function for both primary and cursor planes */
static void intel_plane_destroy(struct drm_plane *plane)
/**
* intel_plane_destroy - destroy a plane
* @plane: plane to destroy
*
* Common destruction function for all types of planes (primary, cursor,
* sprite).
*/
void intel_plane_destroy(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
drm_plane_cleanup(plane);
11760,11 → 13622,16
kfree(intel_plane);
}
 
static const struct drm_plane_funcs intel_primary_plane_funcs = {
.update_plane = intel_primary_plane_setplane,
.disable_plane = intel_primary_plane_disable,
const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.set_property = intel_plane_set_property
.set_property = drm_atomic_helper_plane_set_property,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
 
};
 
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11771,77 → 13638,92
int pipe)
{
struct intel_plane *primary;
struct intel_plane_state *state;
const uint32_t *intel_primary_formats;
int num_formats;
unsigned int num_formats;
 
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
if (primary == NULL)
return NULL;
 
state = intel_create_plane_state(&primary->base);
if (!state) {
kfree(primary);
return NULL;
}
primary->base.state = &state->base;
 
primary->can_scale = false;
primary->max_downscale = 1;
if (INTEL_INFO(dev)->gen >= 9) {
primary->can_scale = true;
state->scaler_id = -1;
}
primary->pipe = pipe;
primary->plane = pipe;
primary->rotation = BIT(DRM_ROTATE_0);
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
primary->commit_plane = intel_commit_primary_plane;
primary->disable_plane = intel_disable_primary_plane;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
 
if (INTEL_INFO(dev)->gen <= 3) {
intel_primary_formats = intel_primary_formats_gen2;
num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
if (INTEL_INFO(dev)->gen >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
} else if (INTEL_INFO(dev)->gen >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
} else {
intel_primary_formats = intel_primary_formats_gen4;
num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
}
 
drm_universal_plane_init(dev, &primary->base, 0,
&intel_primary_plane_funcs,
&intel_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
 
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
BIT(DRM_ROTATE_0) |
BIT(DRM_ROTATE_180));
if (dev->mode_config.rotation_property)
drm_object_attach_property(&primary->base.base,
dev->mode_config.rotation_property,
primary->rotation);
}
if (INTEL_INFO(dev)->gen >= 4)
intel_create_rotation_property(dev, primary);
 
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
 
return &primary->base;
}
 
static int
intel_cursor_plane_disable(struct drm_plane *plane)
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
{
if (!plane->fb)
return 0;
if (!dev->mode_config.rotation_property) {
unsigned long flags = BIT(DRM_ROTATE_0) |
BIT(DRM_ROTATE_180);
 
BUG_ON(!plane->crtc);
if (INTEL_INFO(dev)->gen >= 9)
flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
 
return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev, flags);
}
if (dev->mode_config.rotation_property)
drm_object_attach_property(&plane->base.base,
dev->mode_config.rotation_property,
plane->base.state->rotation);
}
 
static int
intel_check_cursor_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_framebuffer *fb = state->fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_w, crtc_h;
enum pipe pipe = to_intel_plane(plane)->pipe;
unsigned stride;
int ret;
 
ret = drm_plane_helper_check_update(plane, crtc, fb,
src, dest, clip,
ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
&state->dst, &state->clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &state->visible);
11848,141 → 13730,108
if (ret)
return ret;
 
 
/* if we want to turn off the cursor ignore width and height */
if (!obj)
return 0;
 
/* Check for which cursor types we support */
crtc_w = drm_rect_width(&state->orig_dst);
crtc_h = drm_rect_height(&state->orig_dst);
if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
DRM_DEBUG("Cursor dimension not supported\n");
if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
return -EINVAL;
}
 
stride = roundup_pow_of_two(crtc_w) * 4;
if (obj->base.size < stride * crtc_h) {
stride = roundup_pow_of_two(state->base.crtc_w) * 4;
if (obj->base.size < stride * state->base.crtc_h) {
DRM_DEBUG_KMS("buffer is too small\n");
return -ENOMEM;
}
 
if (fb == crtc->cursor->fb)
return 0;
 
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
return -EINVAL;
}
mutex_unlock(&dev->struct_mutex);
 
return ret;
/*
* There's something wrong with the cursor on CHV pipe C.
* If it straddles the left edge of the screen then
* moving it away from the edge or disabling it often
* results in a pipe underrun, and often that can lead to
* dead pipe (constant underrun reported, and it scans
* out just a solid color). To recover from that, the
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
state->visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
 
static int
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
int crtc_w, crtc_h;
 
crtc->cursor_x = state->orig_dst.x1;
crtc->cursor_y = state->orig_dst.y1;
 
intel_plane->crtc_x = state->orig_dst.x1;
intel_plane->crtc_y = state->orig_dst.y1;
intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
intel_plane->src_x = state->orig_src.x1;
intel_plane->src_y = state->orig_src.y1;
intel_plane->src_w = drm_rect_width(&state->orig_src);
intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
 
if (fb != crtc->cursor->fb) {
crtc_w = drm_rect_width(&state->orig_dst);
crtc_h = drm_rect_height(&state->orig_dst);
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
} else {
intel_crtc_update_cursor(crtc, state->visible);
 
 
return 0;
}
 
static void
intel_disable_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
{
intel_crtc_update_cursor(crtc, false);
}
 
static int
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
static void
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane_state state;
int ret;
struct drm_crtc *crtc = state->base.crtc;
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc;
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
 
state.crtc = crtc;
state.fb = fb;
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
 
/* sample coordinates in 16.16 fixed point */
state.src.x1 = src_x;
state.src.x2 = src_x + src_w;
state.src.y1 = src_y;
state.src.y2 = src_y + src_h;
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
addr = i915_gem_obj_ggtt_offset(obj);
else
addr = obj->phys_handle->busaddr;
 
/* integer pixels */
state.dst.x1 = crtc_x;
state.dst.x2 = crtc_x + crtc_w;
state.dst.y1 = crtc_y;
state.dst.y2 = crtc_y + crtc_h;
intel_crtc->cursor_addr = addr;
 
state.clip.x1 = 0;
state.clip.y1 = 0;
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
 
state.orig_src = state.src;
state.orig_dst = state.dst;
 
ret = intel_check_cursor_plane(plane, &state);
if (ret)
return ret;
 
return intel_commit_cursor_plane(plane, &state);
if (crtc->state->active)
intel_crtc_update_cursor(crtc, state->visible);
}
 
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_cursor_plane_update,
.disable_plane = intel_cursor_plane_disable,
.destroy = intel_plane_destroy,
.set_property = intel_plane_set_property,
};
 
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
int pipe)
{
struct intel_plane *cursor;
struct intel_plane_state *state;
 
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
if (cursor == NULL)
return NULL;
 
state = intel_create_plane_state(&cursor->base);
if (!state) {
kfree(cursor);
return NULL;
}
cursor->base.state = &state->base;
 
cursor->can_scale = false;
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
cursor->rotation = BIT(DRM_ROTATE_0);
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
cursor->check_plane = intel_check_cursor_plane;
cursor->commit_plane = intel_commit_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
 
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_cursor_plane_funcs,
&intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
11996,16 → 13845,38
if (dev->mode_config.rotation_property)
drm_object_attach_property(&cursor->base.base,
dev->mode_config.rotation_property,
cursor->rotation);
state->base.rotation);
}
 
if (INTEL_INFO(dev)->gen >=9)
state->scaler_id = -1;
 
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
 
return &cursor->base;
}
 
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
int i;
struct intel_scaler *intel_scaler;
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
 
for (i = 0; i < intel_crtc->num_scalers; i++) {
intel_scaler = &scaler_state->scalers[i];
intel_scaler->in_use = 0;
intel_scaler->mode = PS_SCALER_MODE_DYN;
}
 
scaler_state->scaler_id = -1;
}
 
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
12014,6 → 13885,23
if (intel_crtc == NULL)
return;
 
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
goto fail;
intel_crtc->config = crtc_state;
intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
 
/* initialize shared scalers */
if (INTEL_INFO(dev)->gen >= 9) {
if (pipe == PIPE_C)
intel_crtc->num_scalers = 1;
else
intel_crtc->num_scalers = SKL_NUM_SCALERS;
 
skl_init_scalers(dev, intel_crtc, crtc_state);
}
 
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
goto fail;
12049,6 → 13937,8
intel_crtc->cursor_cntl = ~0;
intel_crtc->cursor_size = ~0;
 
intel_crtc->wm.cxsr_allowed = true;
 
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
12064,6 → 13954,7
drm_plane_cleanup(primary);
if (cursor)
drm_plane_cleanup(cursor);
kfree(crtc_state);
kfree(intel_crtc);
}
 
12087,9 → 13978,6
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
 
if (!drmmode_crtc) {
12136,28 → 14024,6
return true;
}
 
const char *intel_output_name(int output)
{
static const char *names[] = {
[INTEL_OUTPUT_UNUSED] = "Unused",
[INTEL_OUTPUT_ANALOG] = "Analog",
[INTEL_OUTPUT_DVO] = "DVO",
[INTEL_OUTPUT_SDVO] = "SDVO",
[INTEL_OUTPUT_LVDS] = "LVDS",
[INTEL_OUTPUT_TVOUT] = "TV",
[INTEL_OUTPUT_HDMI] = "HDMI",
[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
[INTEL_OUTPUT_EDP] = "eDP",
[INTEL_OUTPUT_DSI] = "DSI",
[INTEL_OUTPUT_UNKNOWN] = "Unknown",
};
 
if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
return "Invalid";
 
return names[output];
}
 
static bool intel_crt_present(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
12188,13 → 14054,26
if (intel_crt_present(dev))
intel_crt_init(dev);
 
if (HAS_DDI(dev)) {
if (IS_BROXTON(dev)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
* detect the ports.
*/
intel_ddi_init(dev, PORT_A);
intel_ddi_init(dev, PORT_B);
intel_ddi_init(dev, PORT_C);
} else if (HAS_DDI(dev)) {
int found;
 
/* Haswell uses DDI functions to detect digital outputs */
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
/* DDI A only supports eDP */
if (found)
/*
* Haswell uses DDI functions to detect digital outputs.
* On SKL pre-D0 the strap isn't connected, so we assume
* it's there.
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
if (found || IS_SKYLAKE(dev))
intel_ddi_init(dev, PORT_A);
 
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
12207,6 → 14086,15
intel_ddi_init(dev, PORT_C);
if (found & SFUSE_STRAP_DDID_DETECTED)
intel_ddi_init(dev, PORT_D);
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
if (IS_SKYLAKE(dev) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
intel_ddi_init(dev, PORT_E);
 
} else if (HAS_PCH_SPLIT(dev)) {
int found;
dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
12244,42 → 14132,41
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
*/
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
!intel_dp_is_edp(dev, PORT_B))
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_B))
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
intel_dp_init(dev, VLV_DP_B, PORT_B);
 
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
!intel_dp_is_edp(dev, PORT_C))
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_C))
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
intel_dp_init(dev, VLV_DP_C, PORT_C);
 
if (IS_CHERRYVIEW(dev)) {
if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
PORT_D);
/* eDP not supported on port D, so don't check VBT */
if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
if (I915_READ(CHV_DP_D) & DP_DETECTED)
intel_dp_init(dev, CHV_DP_D, PORT_D);
}
 
intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
bool found = false;
 
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, GEN3_SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
if (!found && IS_G4X(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
}
 
if (!found && SUPPORTS_INTEGRATED_DP(dev))
if (!found && IS_G4X(dev))
intel_dp_init(dev, DP_B, PORT_B);
}
 
12292,21 → 14179,20
 
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
 
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
if (IS_G4X(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev))
if (IS_G4X(dev))
intel_dp_init(dev, DP_C, PORT_C);
}
 
if (SUPPORTS_INTEGRATED_DP(dev) &&
if (IS_G4X(dev) &&
(I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D, PORT_D);
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
 
 
intel_psr_init(dev);
 
for_each_intel_encoder(dev, encoder) {
12320,59 → 14206,151
drm_helper_move_panel_connectors_to_head(dev);
}
 
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 
drm_framebuffer_cleanup(fb);
mutex_lock(&dev->struct_mutex);
WARN_ON(!intel_fb->obj->framebuffer_references--);
drm_gem_object_unreference(&intel_fb->obj->base);
mutex_unlock(&dev->struct_mutex);
kfree(intel_fb);
}
 
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int *handle)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
 
if (obj->userptr.mm) {
DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
return -EINVAL;
}
 
return drm_gem_handle_create(file, &obj->base, handle);
}
 
static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct drm_device *dev = fb->dev;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
 
mutex_lock(&dev->struct_mutex);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
mutex_unlock(&dev->struct_mutex);
 
return 0;
}
 
static const struct drm_framebuffer_funcs intel_fb_funcs = {
// .destroy = intel_user_framebuffer_destroy,
// .create_handle = intel_user_framebuffer_create_handle,
.destroy = intel_user_framebuffer_destroy,
.create_handle = intel_user_framebuffer_create_handle,
.dirty = intel_user_framebuffer_dirty,
};
 
static
u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format)
{
u32 gen = INTEL_INFO(dev)->gen;
 
if (gen >= 9) {
/* "The stride in bytes must not exceed the of the size of 8K
* pixels and 32K bytes."
*/
return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
} else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
return 16*1024;
else
return 32*1024;
} else if (gen >= 3) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
return 8*1024;
else
return 16*1024;
} else {
/* XXX DSPC is limited to 4k tiled */
return 8*1024;
}
}
 
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int aligned_height;
int pitch_limit;
unsigned int aligned_height;
int ret;
u32 pitch_limit, stride_alignment;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
if (obj->tiling_mode == I915_TILING_Y) {
DRM_DEBUG("hardware does not support tiling Y\n");
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
/* Enforce that fb modifier and tiling mode match, but only for
* X-tiled. This is needed for FBC. */
if (!!(obj->tiling_mode == I915_TILING_X) !=
!!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
return -EINVAL;
}
} else {
if (obj->tiling_mode == I915_TILING_X)
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
else if (obj->tiling_mode == I915_TILING_Y) {
DRM_DEBUG("No Y tiling for legacy addfb\n");
return -EINVAL;
}
}
 
if (mode_cmd->pitches[0] & 63) {
DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
mode_cmd->pitches[0]);
/* Passed in modifier sanity checking. */
switch (mode_cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
if (INTEL_INFO(dev)->gen < 9) {
DRM_DEBUG("Unsupported tiling 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL;
}
case DRM_FORMAT_MOD_NONE:
case I915_FORMAT_MOD_X_TILED:
break;
default:
DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
mode_cmd->modifier[0]);
return -EINVAL;
}
 
if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
pitch_limit = 32*1024;
} else if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode)
pitch_limit = 16*1024;
else
pitch_limit = 32*1024;
} else if (INTEL_INFO(dev)->gen >= 3) {
if (obj->tiling_mode)
pitch_limit = 8*1024;
else
pitch_limit = 16*1024;
} else
/* XXX DSPC is limited to 4k tiled */
pitch_limit = 8*1024;
stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
mode_cmd->pitches[0], stride_alignment);
return -EINVAL;
}
 
pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
obj->tiling_mode ? "tiled" : "linear",
DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
"tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
return -EINVAL;
}
 
if (obj->tiling_mode != I915_TILING_NONE &&
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
mode_cmd->pitches[0] != obj->stride) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], obj->stride);
12387,7 → 14365,6
case DRM_FORMAT_ARGB8888:
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
if (INTEL_INFO(dev)->gen > 3) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
12394,12 → 14371,16
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR8888:
if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
12406,6 → 14387,13
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR2101010:
if (!IS_VALLEYVIEW(dev)) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
12426,8 → 14414,9
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
 
aligned_height = intel_align_height(dev, mode_cmd->height,
obj->tiling_mode);
aligned_height = intel_fb_align_height(dev, mode_cmd->height,
mode_cmd->pixel_format,
mode_cmd->modifier[0]);
/* FIXME drm helper for size checks (especially planar formats)? */
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
return -EINVAL;
12445,7 → 14434,23
return 0;
}
 
#ifndef CONFIG_DRM_I915_FBDEV
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd2 *user_mode_cmd)
{
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
mode_cmd.handles[0]));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
 
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
 
#ifndef CONFIG_DRM_FBDEV_EMULATION
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
12452,8 → 14457,12
#endif
 
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = NULL,
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
.atomic_state_alloc = intel_atomic_state_alloc,
.atomic_state_clear = intel_atomic_state_clear,
};
 
/* Set up chip specific display functions */
12472,57 → 14481,91
else
dev_priv->display.find_dpll = i9xx_find_best_dpll;
 
if (HAS_DDI(dev)) {
if (INTEL_INFO(dev)->gen >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
dev_priv->display.get_initial_plane_config =
skylake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
if (INTEL_INFO(dev)->gen >= 9)
dev_priv->display.update_primary_plane =
skylake_update_primary_plane;
else
} else if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
ironlake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
dev_priv->display.get_initial_plane_config =
ironlake_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_primary_plane =
i9xx_update_primary_plane;
}
 
/* Returns the core display clock speed */
if (IS_VALLEYVIEW(dev))
if (IS_SKYLAKE(dev))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
else if (IS_BROXTON(dev))
dev_priv->display.get_display_clock_speed =
broxton_get_display_clock_speed;
else if (IS_BROADWELL(dev))
dev_priv->display.get_display_clock_speed =
broadwell_get_display_clock_speed;
else if (IS_HASWELL(dev))
dev_priv->display.get_display_clock_speed =
haswell_get_display_clock_speed;
else if (IS_VALLEYVIEW(dev))
dev_priv->display.get_display_clock_speed =
valleyview_get_display_clock_speed;
else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
else if (IS_GEN5(dev))
dev_priv->display.get_display_clock_speed =
ilk_get_display_clock_speed;
else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
IS_GEN6(dev) || IS_IVYBRIDGE(dev))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_GM45(dev))
dev_priv->display.get_display_clock_speed =
gm45_get_display_clock_speed;
else if (IS_CRESTLINE(dev))
dev_priv->display.get_display_clock_speed =
i965gm_get_display_clock_speed;
else if (IS_PINEVIEW(dev))
dev_priv->display.get_display_clock_speed =
pnv_get_display_clock_speed;
else if (IS_G33(dev) || IS_G4X(dev))
dev_priv->display.get_display_clock_speed =
g33_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
12540,10 → 14583,12
i865_get_display_clock_speed;
else if (IS_I85X(dev))
dev_priv->display.get_display_clock_speed =
i855_get_display_clock_speed;
else /* 852, 830 */
i85x_get_display_clock_speed;
else { /* 830 */
WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
}
 
if (IS_GEN5(dev)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12552,22 → 14597,30
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
dev_priv->display.modeset_global_resources =
ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
if (IS_BROADWELL(dev)) {
dev_priv->display.modeset_commit_cdclk =
broadwell_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
broadwell_modeset_calc_cdclk;
}
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
dev_priv->display.modeset_commit_cdclk =
valleyview_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
valleyview_modeset_calc_cdclk;
} else if (IS_BROXTON(dev)) {
dev_priv->display.modeset_commit_cdclk =
broxton_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
broxton_modeset_calc_cdclk;
}
 
/* Default just returns -ENODEV to indicate unsupported */
// dev_priv->display.queue_flip = intel_default_queue_flip;
 
 
 
 
intel_panel_init_backlight_funcs(dev);
 
mutex_init(&dev_priv->pps_mutex);
}
12658,9 → 14711,6
};
 
static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
 
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
 
12706,11 → 14756,20
/* Apple Macbook 2,1 (Core 2 T7400) */
{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
 
/* Apple Macbook 4,1 */
{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
 
/* Toshiba CB35 Chromebook (Celeron 2955U) */
{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
 
/* HP Chromebook 14 (Celeron 2955U) */
{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
 
/* Dell Chromebook 11 */
{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
 
/* Dell Chromebook 11 (2015 version) */
{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
};
 
static void intel_init_quirks(struct drm_device *dev)
12741,6 → 14800,7
u8 sr1;
u32 vga_reg = i915_vgacntrl_reg(dev);
 
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
12748,23 → 14808,15
// vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
 
/*
* Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
* from S3 without preserving (some of?) the other bits.
*/
I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
 
void intel_modeset_init_hw(struct drm_device *dev)
{
intel_update_cdclk(dev);
intel_prepare_ddi(dev);
 
if (IS_VALLEYVIEW(dev))
vlv_update_cdclk(dev);
 
intel_init_clock_gating(dev);
 
intel_enable_gt_powersave(dev);
}
 
12774,7 → 14826,7
int sprite, ret;
enum pipe pipe;
struct intel_crtc *crtc;
 
ENTER();
drm_mode_config_init(dev);
 
dev->mode_config.min_width = 0;
12783,6 → 14835,8
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
 
dev->mode_config.allow_fb_modifiers = true;
 
dev->mode_config.funcs = &intel_mode_funcs;
 
intel_init_quirks(dev);
12792,6 → 14846,24
if (INTEL_INFO(dev)->num_pipes == 0)
return;
 
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE);
 
if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
bios_lvds_use_ssc ? "en" : "dis",
dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
 
intel_init_display(dev);
 
if (IS_GEN2(dev)) {
12821,7 → 14893,7
 
for_each_pipe(dev_priv, pipe) {
intel_crtc_init(dev, pipe);
for_each_sprite(pipe, sprite) {
for_each_sprite(dev_priv, pipe, sprite) {
ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
12829,24 → 14901,25
}
}
 
intel_init_dpio(dev);
intel_update_czclk(dev_priv);
intel_update_cdclk(dev);
 
intel_shared_dpll_init(dev);
 
/* save the BIOS value before clobbering it */
dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
 
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
intel_fbc_disable(dev_priv);
 
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, false);
intel_modeset_setup_hw_state(dev);
drm_modeset_unlock_all(dev);
 
for_each_intel_crtc(dev, crtc) {
struct intel_initial_plane_config plane_config = {};
 
if (!crtc->active)
continue;
 
12857,17 → 14930,28
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
if (dev_priv->display.get_plane_config) {
dev_priv->display.get_plane_config(crtc,
&crtc->plane_config);
dev_priv->display.get_initial_plane_config(crtc,
&plane_config);
 
/*
* If the fb is shared between multiple heads, we'll
* just get the first one.
*/
crtc->plane_config.size = 16*1024*1024;
intel_find_plane_obj(crtc, &crtc->plane_config);
intel_find_initial_plane_obj(crtc, &plane_config);
if(!main_fb_obj)
{
struct drm_framebuffer *fb;
 
fb = crtc->base.primary->fb;
main_fb_obj = intel_fb_obj(fb);
main_fb_obj->map_and_fenceable=true;
DRM_DEBUG_KMS("main_fb_obj %p gtt_offset 0x%08lx\n", main_fb_obj, i915_gem_obj_ggtt_offset(main_fb_obj));
}
 
}
 
 
LEAVE();
}
 
static void intel_enable_pipe_a(struct drm_device *dev)
12880,9 → 14964,7
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
* by enabling the load detect pipe once. */
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
for_each_intel_connector(dev, connector) {
if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
crt = &connector->base;
break;
12893,7 → 14975,7
return;
 
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
intel_release_load_detect_pipe(crt, &load_detect_temp);
intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
}
 
static bool
12901,13 → 14983,12
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
u32 val;
 
if (INTEL_INFO(dev)->num_pipes == 1)
return true;
 
reg = DSPCNTR(!crtc->plane);
val = I915_READ(reg);
val = I915_READ(DSPCNTR(!crtc->plane));
 
if ((val & DISPLAY_PLANE_ENABLE) &&
(!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12916,6 → 14997,17
return true;
}
 
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
 
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
return true;
 
return false;
}
 
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
12923,21 → 15015,29
u32 reg;
 
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->config.cpu_transcoder);
reg = PIPECONF(crtc->config->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
/* restore vblank interrupts to correct state */
drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) {
update_scanline_offset(crtc);
drm_vblank_on(dev, crtc->pipe);
} else
drm_vblank_off(dev, crtc->pipe);
struct intel_plane *plane;
 
drm_crtc_vblank_on(&crtc->base);
 
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
continue;
 
plane->disable_plane(&plane->base, &crtc->base);
}
}
 
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
struct intel_connector *connector;
bool plane;
 
DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12947,33 → 15047,12
* Temporarily change the plane mapping and disable everything
* ... */
plane = crtc->plane;
to_intel_plane_state(crtc->base.primary->state)->visible = true;
crtc->plane = !plane;
crtc->primary_enabled = true;
dev_priv->display.crtc_disable(&crtc->base);
intel_crtc_disable_noatomic(&crtc->base);
crtc->plane = plane;
 
/* ... and break all links. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->encoder->base.crtc != &crtc->base)
continue;
 
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
/* multiple connectors may have the same encoder:
* handle them and break crtc link separately */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head)
if (connector->encoder->base.crtc == &crtc->base) {
connector->encoder->base.crtc = NULL;
connector->encoder->connectors_active = false;
}
 
WARN_ON(crtc->active);
crtc->base.enabled = false;
}
 
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
crtc->pipe == PIPE_A && !crtc->active) {
/* BIOS forgot to enable pipe A, this mostly happens after
12985,19 → 15064,23
 
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
intel_crtc_update_dpms(&crtc->base);
if (!intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
 
if (crtc->active != crtc->base.enabled) {
if (crtc->active != crtc->base.state->active) {
struct intel_encoder *encoder;
 
/* This can happen either due to bugs in the get_hw_state
* functions or because the pipe is force-enabled due to the
* functions or because of calls to intel_crtc_disable_noatomic,
* or because the pipe is force-enabled due to the
* pipe A quirk. */
DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
crtc->base.base.id,
crtc->base.enabled ? "enabled" : "disabled",
crtc->base.state->enable ? "enabled" : "disabled",
crtc->active ? "enabled" : "disabled");
 
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
 
/* Because we only establish the connector -> encoder ->
13007,11 → 15090,9
* actually up, hence no need to break them. */
WARN_ON(crtc->active);
 
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
WARN_ON(encoder->connectors_active);
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
encoder->base.crtc = NULL;
}
}
 
if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
/*
13036,6 → 15117,7
{
struct intel_connector *connector;
struct drm_device *dev = encoder->base.dev;
bool active = false;
 
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
13043,7 → 15125,15
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
 
if (encoder->connectors_active && !has_active_crtc) {
for_each_intel_connector(dev, connector) {
if (connector->base.encoder != &encoder->base)
continue;
 
active = true;
break;
}
 
if (active && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
13060,15 → 15150,12
encoder->post_disable(encoder);
}
encoder->base.crtc = NULL;
encoder->connectors_active = false;
 
/* Inconsistent output/port/pipe state happens presumably due to
* a bug in one of the get_hw_state functions. Or someplace else
* in our code, like the register restore mess on resume. Clamp
* things to off as a safer default. */
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
for_each_intel_connector(dev, connector) {
if (connector->encoder != encoder)
continue;
connector->base.dpms = DRM_MODE_DPMS_OFF;
13107,14 → 15194,25
i915_redisable_vga_power_on(dev);
}
 
static bool primary_get_hw_state(struct intel_crtc *crtc)
static bool primary_get_hw_state(struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 
if (!crtc->active)
return false;
return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
}
 
return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct intel_crtc *crtc)
{
struct drm_plane *primary = crtc->base.primary;
struct intel_plane_state *plane_state =
to_intel_plane_state(primary->state);
 
plane_state->visible =
primary_get_hw_state(to_intel_plane(primary));
 
if (plane_state->visible)
crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
}
 
static void intel_modeset_readout_hw_state(struct drm_device *dev)
13127,16 → 15225,18
int i;
 
for_each_intel_crtc(dev, crtc) {
memset(&crtc->config, 0, sizeof(crtc->config));
__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
memset(crtc->config, 0, sizeof(*crtc->config));
crtc->config->base.crtc = &crtc->base;
 
crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
 
crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config);
crtc->config);
 
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
crtc->primary_enabled = primary_get_hw_state(crtc);
 
readout_plane_state(crtc);
 
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
crtc->active ? "enabled" : "disabled");
13169,12 → 15269,11
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
encoder->get_config(encoder, &crtc->config);
encoder->get_config(encoder, crtc->config);
} else {
encoder->base.crtc = NULL;
}
 
encoder->connectors_active = false;
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id,
encoder->base.name,
13182,11 → 15281,9
pipe_name(pipe));
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
for_each_intel_connector(dev, connector) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
connector->encoder->connectors_active = true;
connector->base.encoder = &connector->encoder->base;
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
13197,12 → 15294,46
connector->base.name,
connector->base.encoder ? "enabled" : "disabled");
}
 
for_each_intel_crtc(dev, crtc) {
crtc->base.hwmode = crtc->config->base.adjusted_mode;
 
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc->base.state->active) {
intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
 
/*
* The initial mode needs to be set in order to keep
* the atomic core happy. It wants a valid mode if the
* crtc's enabled, so we do the above call.
*
* At this point some state updated by the connectors
* in their ->detect() callback has not run yet, so
* no recalculation can be done yet.
*
* Even if we could do a recalculation and modeset
* right now it would cause a double modeset if
* fbdev or userspace chooses a different initial mode.
*
* If that happens, someone indicated they wanted a
* mode change, which means it's safe to do a full
* recalculation.
*/
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
 
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
}
}
}
 
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore)
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
static void
intel_modeset_setup_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
13212,20 → 15343,6
 
intel_modeset_readout_hw_state(dev);
 
/*
* Now that we have the config, copy it to each CRTC struct
* Note that this could go away if we move to using crtc_config
* checking everywhere.
*/
for_each_intel_crtc(dev, crtc) {
if (crtc->active && i915.fastboot) {
intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
crtc->base.base.id);
drm_mode_debug_printmodeline(&crtc->base.mode);
}
}
 
/* HW state is read out, now we need to sanitize this mess. */
for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
13234,9 → 15351,12
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_sanitize_crtc(crtc);
intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
}
 
intel_modeset_update_connector_atomic_state(dev);
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
13249,52 → 15369,85
pll->on = false;
}
 
if (IS_GEN9(dev))
if (IS_VALLEYVIEW(dev))
vlv_wm_get_hw_state(dev);
else if (IS_GEN9(dev))
skl_wm_get_hw_state(dev);
else if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
 
if (force_restore) {
i915_redisable_vga(dev);
for_each_intel_crtc(dev, crtc) {
unsigned long put_domains;
 
/*
* We need to use raw interfaces for restoring state to avoid
* checking (bogus) intermediate states.
*/
for_each_pipe(dev_priv, pipe) {
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
put_domains = modeset_get_crtc_power_domains(&crtc->base);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
intel_display_set_init_power(dev_priv, false);
}
 
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
crtc->primary->fb);
void intel_display_resume(struct drm_device *dev)
{
struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
struct intel_connector *conn;
struct intel_plane *plane;
struct drm_crtc *crtc;
int ret;
 
if (!state)
return;
 
state->acquire_ctx = dev->mode_config.acquire_ctx;
 
/* preserve complete old state, including dpll */
intel_atomic_get_shared_dpll_state(state);
 
for_each_crtc(dev, crtc) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_crtc_state(state, crtc);
 
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
goto err;
 
/* force a restore */
crtc_state->mode_changed = true;
}
} else {
intel_modeset_update_staged_output_state(dev);
 
for_each_intel_plane(dev, plane) {
ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
if (ret)
goto err;
}
 
intel_modeset_check_state(dev);
for_each_intel_connector(dev, conn) {
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
if (ret)
goto err;
}
 
intel_modeset_setup_hw_state(dev);
 
i915_redisable_vga(dev);
ret = drm_atomic_commit(state);
if (!ret)
return;
 
err:
DRM_ERROR("Restoring old state failed with %i\n", ret);
drm_atomic_state_free(state);
}
 
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
int ret;
 
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
 
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE);
 
intel_modeset_init_hw(dev);
 
// intel_setup_overlay(dev);
13304,22 → 15457,29
* pinned & fenced. When we do the allocation it's too early
* for this.
*/
mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
 
if (intel_pin_and_fence_fb_obj(c->primary,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(c->primary,
c->primary->fb,
NULL)) {
c->primary->state,
NULL, NULL);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
c->primary->fb = NULL;
c->primary->crtc = c->primary->state->crtc = NULL;
update_state_fb(c->primary);
c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
}
}
mutex_unlock(&dev->struct_mutex);
 
intel_backlight_register(dev);
}
 
void intel_connector_unregister(struct intel_connector *intel_connector)
13353,16 → 15513,10
*/
drm_kms_helper_poll_fini(dev);
 
mutex_lock(&dev->struct_mutex);
 
intel_unregister_dsm_handler();
 
intel_disable_fbc(dev);
intel_fbc_disable(dev_priv);
 
ironlake_teardown_rc6(dev);
 
mutex_unlock(&dev->struct_mutex);
 
/* flush any delayed tasks or pending work */
flush_scheduled_work();
 
/drivers/video/drm/i915/intel_dp.c
29,6 → 29,7
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
38,29 → 39,35
 
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
 
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
 
struct dp_link_dpll {
int link_bw;
int clock;
struct dpll dpll;
};
 
static const struct dp_link_dpll gen4_dpll[] = {
{ DP_LINK_BW_1_62,
{ 162000,
{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
{ DP_LINK_BW_2_7,
{ 270000,
{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
};
 
static const struct dp_link_dpll pch_dpll[] = {
{ DP_LINK_BW_1_62,
{ 162000,
{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
{ DP_LINK_BW_2_7,
{ 270000,
{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
};
 
static const struct dp_link_dpll vlv_dpll[] = {
{ DP_LINK_BW_1_62,
{ 162000,
{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
{ DP_LINK_BW_2_7,
{ 270000,
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};
 
74,14 → 81,20
* m2 is stored in fixed point format using formula below
* (m2_int << 22) | m2_fraction
*/
{ DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
{ 162000, /* m2_int = 32, m2_fraction = 1677722 */
{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
{ DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
{ 270000, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ 540000, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
 
static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
324000, 432000, 540000 };
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
static const int default_rates[] = { 162000, 270000, 540000 };
 
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
115,24 → 128,21
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
 
int
static unsigned int intel_dp_unused_lane_mask(int lane_count)
{
return ~((1 << lane_count) - 1) & 0xf;
}
 
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
struct drm_device *dev = intel_dp->attached_connector->base.dev;
 
switch (max_link_bw) {
case DP_LINK_BW_1_62:
case DP_LINK_BW_2_7:
case DP_LINK_BW_5_4:
break;
case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
INTEL_INFO(dev)->gen >= 8) &&
intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
max_link_bw = DP_LINK_BW_5_4;
else
max_link_bw = DP_LINK_BW_2_7;
break;
default:
WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
max_link_bw);
207,7 → 217,7
target_clock = fixed_mode->clock;
}
 
max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
 
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
237,7 → 247,7
return v;
}
 
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
246,40 → 256,6
dst[i] = src >> ((3-i) * 8);
}
 
/* hrawclock is 1/4 the FSB frequency */
static int
intel_hrawclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
 
/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
if (IS_VALLEYVIEW(dev))
return 200;
 
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
return 100;
case CLKCFG_FSB_533:
return 133;
case CLKCFG_FSB_667:
return 166;
case CLKCFG_FSB_800:
return 200;
case CLKCFG_FSB_1067:
return 266;
case CLKCFG_FSB_1333:
return 333;
/* these two are just a guess; one of them might be right */
case CLKCFG_FSB_1600:
case CLKCFG_FSB_1600_ALT:
return 400;
default:
return 133;
}
}
 
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp);
299,7 → 275,7
* See vlv_power_sequencer_reset() why we need
* a power domain reference here.
*/
power_domain = intel_display_port_power_domain(encoder);
power_domain = intel_display_port_aux_power_domain(encoder);
intel_display_power_get(dev_priv, power_domain);
 
mutex_lock(&dev_priv->pps_mutex);
315,7 → 291,7
 
mutex_unlock(&dev_priv->pps_mutex);
 
power_domain = intel_display_port_power_domain(encoder);
power_domain = intel_display_port_aux_power_domain(encoder);
intel_display_power_put(dev_priv, power_domain);
}
 
326,7 → 302,9
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
uint32_t DP;
 
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
356,9 → 334,13
* The DPLL for the pipe must be enabled for this to work.
* So enable temporarily it if it's not already enabled.
*/
if (!pll_enabled)
if (!pll_enabled) {
release_cl_override = IS_CHERRYVIEW(dev) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 
vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll);
}
 
/*
* Similar magic as in intel_dp_enable_port().
375,9 → 357,13
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
 
if (!pll_enabled)
if (!pll_enabled) {
vlv_force_pll_off(dev, pipe);
 
if (release_cl_override)
chv_phy_powergate_ch(dev_priv, phy, ch, false);
}
}
 
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
557,7 → 543,9
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
if (HAS_PCH_SPLIT(dev))
if (IS_BROXTON(dev))
return BXT_PP_CONTROL(0);
else if (HAS_PCH_SPLIT(dev))
return PCH_PP_CONTROL;
else
return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
567,7 → 555,9
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
if (HAS_PCH_SPLIT(dev))
if (IS_BROXTON(dev))
return BXT_PP_STATUS(0);
else if (HAS_PCH_SPLIT(dev))
return PCH_PP_STATUS;
else
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
583,8 → 573,6
edp_notifier);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_div;
u32 pp_ctrl_reg, pp_div_reg;
 
if (!is_edp(intel_dp) || code != SYS_RESTART)
return 0;
593,6 → 581,8
 
if (IS_VALLEYVIEW(dev)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
u32 pp_ctrl_reg, pp_div_reg;
u32 pp_div;
 
pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
696,15 → 686,14
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (index)
return 0;
 
if (intel_dig_port->port == PORT_A) {
if (IS_GEN6(dev) || IS_GEN7(dev))
return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
return 225; /* eDP input clock at 450Mhz */
return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
 
} else {
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
}
719,7 → 708,7
if (intel_dig_port->port == PORT_A) {
if (index)
return 0;
return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
826,8 → 815,6
 
intel_dp_check_edp(intel_dp);
 
intel_aux_display_runtime_get(dev_priv);
 
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ_NOTRACE(ch_ctl);
837,8 → 824,15
}
 
if (try == 3) {
static u32 last_status = -1;
const u32 status = I915_READ(ch_ctl);
 
if (status != last_status) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
I915_READ(ch_ctl));
status);
last_status = status;
}
 
ret = -EBUSY;
goto out;
}
875,15 → 869,22
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
 
if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR))
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
continue;
if (status & DP_AUX_CH_CTL_DONE)
break;
 
/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
* 400us delay required for errors and timeouts
* Timeout errors from the HW already meet this
* requirement so skip to next iteration
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
usleep_range(400, 500);
continue;
}
if (status & DP_AUX_CH_CTL_DONE)
break;
goto done;
}
}
 
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
891,6 → 892,7
goto out;
}
 
done:
/* Check for timeout or receive error.
* Timeouts occur when the sink is not connected
*/
920,8 → 922,6
 
ret = recv_bytes;
out:
// pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_aux_display_runtime_put(dev_priv);
 
if (vdd)
edp_panel_vdd_off(intel_dp, false);
941,8 → 941,9
size_t txsize, rxsize;
int ret;
 
txbuf[0] = msg->request << 4;
txbuf[1] = msg->address >> 8;
txbuf[0] = (msg->request << 4) |
((msg->address >> 16) & 0xf);
txbuf[1] = (msg->address >> 8) & 0xff;
txbuf[2] = msg->address & 0xff;
txbuf[3] = msg->size - 1;
 
949,8 → 950,9
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
case DP_AUX_I2C_WRITE_STATUS_UPDATE:
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 1;
rxsize = 2; /* 0 or 1 data bytes */
 
if (WARN_ON(txsize > 20))
return -E2BIG;
961,9 → 963,14
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
 
if (ret > 1) {
/* Number of bytes written in a short write. */
ret = clamp_t(int, rxbuf[1], 0, msg->size);
} else {
/* Return payload size. */
ret = msg->size;
}
}
break;
 
case DP_AUX_NATIVE_READ:
1000,11 → 1007,34
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
const char *name = NULL;
uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
int ret;
 
/* On SKL we don't have Aux for port E so we rely on VBT to set
* a proper alternate aux channel.
*/
if (IS_SKYLAKE(dev) && port == PORT_E) {
switch (info->alternate_aux_channel) {
case DP_AUX_B:
porte_aux_ctl_reg = DPB_AUX_CH_CTL;
break;
case DP_AUX_C:
porte_aux_ctl_reg = DPC_AUX_CH_CTL;
break;
case DP_AUX_D:
porte_aux_ctl_reg = DPD_AUX_CH_CTL;
break;
case DP_AUX_A:
default:
porte_aux_ctl_reg = DPA_AUX_CH_CTL;
}
}
 
switch (port) {
case PORT_A:
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1022,6 → 1052,10
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
name = "DPDDC-D";
break;
case PORT_E:
intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
name = "DPDDC-E";
break;
default:
BUG();
}
1035,7 → 1069,7
*
* Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
*/
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
intel_dp->aux.name = name;
1042,7 → 1076,8
intel_dp->aux.dev = dev->dev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
 
DRM_DEBUG_KMS("registering %s bus\n", name);
DRM_DEBUG_KMS("registering %s bus for %s\n", name,
"");
 
ret = drm_dp_aux_register(&intel_dp->aux);
if (ret < 0) {
1061,51 → 1096,122
}
 
static void
skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
{
u32 ctrl1;
 
memset(&pipe_config->dpll_hw_state, 0,
sizeof(pipe_config->dpll_hw_state));
 
pipe_config->ddi_pll_sel = SKL_DPLL0;
pipe_config->dpll_hw_state.cfgcr1 = 0;
pipe_config->dpll_hw_state.cfgcr2 = 0;
 
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
switch (link_bw) {
case DP_LINK_BW_1_62:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
switch (pipe_config->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
SKL_DPLL0);
break;
case DP_LINK_BW_2_7:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
case 135000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
SKL_DPLL0);
break;
case DP_LINK_BW_5_4:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
case 270000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
SKL_DPLL0);
break;
case 162000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
SKL_DPLL0);
break;
/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
results in CDCLK change. Need to handle the change of CDCLK by
disabling pipes and re-enabling them */
case 108000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
SKL_DPLL0);
break;
case 216000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
SKL_DPLL0);
break;
 
}
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
 
static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
memset(&pipe_config->dpll_hw_state, 0,
sizeof(pipe_config->dpll_hw_state));
 
switch (pipe_config->port_clock / 2) {
case 81000:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
case DP_LINK_BW_2_7:
case 135000:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
case DP_LINK_BW_5_4:
case 270000:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
}
}
 
static int
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
{
if (intel_dp->num_sink_rates) {
*sink_rates = intel_dp->sink_rates;
return intel_dp->num_sink_rates;
}
 
*sink_rates = default_rates;
 
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
 
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
{
/* WaDisableHBR2:skl */
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
return false;
 
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
(INTEL_INFO(dev)->gen >= 9))
return true;
else
return false;
}
 
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
int size;
 
if (IS_BROXTON(dev)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
*source_rates = default_rates;
size = ARRAY_SIZE(default_rates);
}
 
/* This depends on the fact that 5.4 is last value in the array */
if (!intel_dp_source_supports_hbr2(dev))
size--;
 
return size;
}
 
static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
const struct dp_link_dpll *divisor = NULL;
1127,7 → 1233,7
 
if (divisor && count) {
for (i = 0; i < count; i++) {
if (link_bw == divisor[i].link_bw) {
if (pipe_config->port_clock == divisor[i].clock) {
pipe_config->dpll = divisor[i].dpll;
pipe_config->clock_set = true;
break;
1136,16 → 1242,136
}
}
 
static int intersect_rates(const int *source_rates, int source_len,
const int *sink_rates, int sink_len,
int *common_rates)
{
int i = 0, j = 0, k = 0;
 
while (i < source_len && j < sink_len) {
if (source_rates[i] == sink_rates[j]) {
if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
return k;
common_rates[k] = source_rates[i];
++k;
++i;
++j;
} else if (source_rates[i] < sink_rates[j]) {
++i;
} else {
++j;
}
}
return k;
}
 
static int intel_dp_common_rates(struct intel_dp *intel_dp,
int *common_rates)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
const int *source_rates, *sink_rates;
int source_len, sink_len;
 
sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
source_len = intel_dp_source_rates(dev, &source_rates);
 
return intersect_rates(source_rates, source_len,
sink_rates, sink_len,
common_rates);
}
 
static void snprintf_int_array(char *str, size_t len,
const int *array, int nelem)
{
int i;
 
str[0] = '\0';
 
for (i = 0; i < nelem; i++) {
int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
if (r >= len)
return;
str += r;
len -= r;
}
}
 
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
const int *source_rates, *sink_rates;
int source_len, sink_len, common_len;
int common_rates[DP_MAX_SUPPORTED_RATES];
char str[128]; /* FIXME: too big for stack? */
 
if ((drm_debug & DRM_UT_KMS) == 0)
return;
 
source_len = intel_dp_source_rates(dev, &source_rates);
snprintf_int_array(str, sizeof(str), source_rates, source_len);
DRM_DEBUG_KMS("source rates: %s\n", str);
 
sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
DRM_DEBUG_KMS("sink rates: %s\n", str);
 
common_len = intel_dp_common_rates(intel_dp, common_rates);
snprintf_int_array(str, sizeof(str), common_rates, common_len);
DRM_DEBUG_KMS("common rates: %s\n", str);
}
 
static int rate_to_index(int find, const int *rates)
{
int i = 0;
 
for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
if (find == rates[i])
break;
 
return i;
}
 
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
int rates[DP_MAX_SUPPORTED_RATES] = {};
int len;
 
len = intel_dp_common_rates(intel_dp, rates);
if (WARN_ON(len <= 0))
return 162000;
 
return rates[rate_to_index(0, rates) - 1];
}
 
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
return rate_to_index(rate, intel_dp->sink_rates);
}
 
static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select)
{
if (intel_dp->num_sink_rates) {
*link_bw = 0;
*rate_select =
intel_dp_rate_select(intel_dp, port_clock);
} else {
*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
*rate_select = 0;
}
}
 
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *intel_crtc = encoder->new_crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int min_lane_count = 1;
1152,21 → 1378,38
int max_lane_count = intel_dp_max_lane_count(intel_dp);
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
int max_clock;
int bpp, mode_rate;
static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
int link_avail, link_clock;
int common_rates[DP_MAX_SUPPORTED_RATES] = {};
int common_len;
uint8_t link_bw, rate_select;
 
common_len = intel_dp_common_rates(intel_dp, common_rates);
 
/* No common link rates between source and sink */
WARN_ON(common_len <= 0);
 
max_clock = common_len - 1;
 
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
 
pipe_config->has_dp_encoder = true;
pipe_config->has_drrs = false;
pipe_config->has_audio = intel_dp->has_audio;
pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
 
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
 
if (INTEL_INFO(dev)->gen >= 9) {
int ret;
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
}
 
if (!HAS_PCH_SPLIT(dev))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
1179,8 → 1422,8
return false;
 
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock],
"max bw %d pixel clock %iKHz\n",
max_lane_count, common_rates[max_clock],
adjusted_mode->crtc_clock);
 
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1187,7 → 1430,10
* bpc in between. */
bpp = pipe_config->pipe_bpp;
if (is_edp(intel_dp)) {
if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
 
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
dev_priv->vbt.edp_bpp);
bpp = dev_priv->vbt.edp_bpp;
1209,8 → 1455,11
bpp);
 
for (clock = min_clock; clock <= max_clock; clock++) {
for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
for (lane_count = min_lane_count;
lane_count <= max_lane_count;
lane_count <<= 1) {
 
link_clock = common_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
 
1230,22 → 1479,23
* CEA-861-E - 5.1 Default Encoding Parameters
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
intel_dp->color_range = DP_COLOR_RANGE_16_235;
else
intel_dp->color_range = 0;
pipe_config->limited_color_range =
bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
} else {
pipe_config->limited_color_range =
intel_dp->limited_color_range;
}
 
if (intel_dp->color_range)
pipe_config->limited_color_range = true;
pipe_config->lane_count = lane_count;
 
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
pipe_config->pipe_bpp = bpp;
pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
pipe_config->port_clock = common_rates[clock];
 
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
&link_bw, &rate_select);
 
DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
link_bw, rate_select, pipe_config->lane_count,
pipe_config->port_clock, bpp);
DRM_DEBUG_KMS("DP link bw required %i available %i\n",
mode_rate, link_avail);
1256,7 → 1506,7
&pipe_config->dp_m_n);
 
if (intel_connector->panel.downclock_mode != NULL &&
intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
1265,11 → 1515,13
}
 
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
skl_edp_set_pll_config(pipe_config);
else if (IS_BROXTON(dev))
/* handled in ddi */;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
hsw_dp_set_ddi_pll_sel(pipe_config);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
intel_dp_set_clock(encoder, pipe_config);
 
return true;
}
1282,11 → 1534,12
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
crtc->config->port_clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
 
if (crtc->config.port_clock == 162000) {
if (crtc->config->port_clock == 162000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
1304,6 → 1557,13
udelay(500);
}
 
void intel_dp_set_link_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
intel_dp->link_rate = pipe_config->port_clock;
intel_dp->lane_count = pipe_config->lane_count;
}
 
static void intel_dp_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
1311,8 → 1571,10
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
 
intel_dp_set_link_params(intel_dp, crtc->config);
 
/*
* There are four kinds of DP registers:
*
1337,14 → 1599,14
 
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
 
if (crtc->config.has_audio)
if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
/* Split out the IBX/CPU vs CPT settings */
 
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (IS_GEN7(dev) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1355,10 → 1617,22
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
intel_dp->DP |= crtc->pipe << 29;
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
intel_dp->DP |= intel_dp->color_range;
} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
u32 trans_dp;
 
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 
trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
trans_dp |= TRANS_DP_ENH_FRAMING;
else
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
crtc->config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1368,16 → 1642,12
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
if (!IS_CHERRYVIEW(dev)) {
if (crtc->pipe == 1)
if (IS_CHERRYVIEW(dev))
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
else if (crtc->pipe == PIPE_B)
intel_dp->DP |= DP_PIPEB_SELECT;
} else {
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
}
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
}
 
#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1464,8 → 1734,10
lockdep_assert_held(&dev_priv->pps_mutex);
 
control = I915_READ(_pp_ctrl_reg(intel_dp));
if (!IS_BROXTON(dev)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
}
return control;
}
 
1490,12 → 1762,13
if (!is_edp(intel_dp))
return false;
 
cancel_delayed_work(&intel_dp->panel_vdd_work);
intel_dp->want_panel_vdd = true;
 
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
 
power_domain = intel_display_port_power_domain(intel_encoder);
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1544,7 → 1817,7
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
 
WARN(!vdd, "eDP port %c VDD already requested on\n",
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->port));
}
 
1585,7 → 1858,7
if ((pp & POWER_TARGET_ON) == 0)
intel_dp->last_power_cycle = jiffies;
 
power_domain = intel_display_port_power_domain(intel_encoder);
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
 
1628,7 → 1901,7
if (!is_edp(intel_dp))
return;
 
WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
port_name(dp_to_dig_port(intel_dp)->port));
 
intel_dp->want_panel_vdd = false;
1736,7 → 2009,7
wait_panel_off(intel_dp);
 
/* We got a reference when we enabled the VDD. */
power_domain = intel_display_port_power_domain(intel_encoder);
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
 
1958,41 → 2231,25
if (!(tmp & DP_PORT_EN))
return false;
 
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
} else if (IS_CHERRYVIEW(dev)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
*pipe = PORT_TO_PIPE(tmp);
} else {
u32 trans_sel;
u32 trans_dp;
int i;
} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
enum pipe p;
 
switch (intel_dp->output_reg) {
case PCH_DP_B:
trans_sel = TRANS_DP_PORT_SEL_B;
break;
case PCH_DP_C:
trans_sel = TRANS_DP_PORT_SEL_C;
break;
case PCH_DP_D:
trans_sel = TRANS_DP_PORT_SEL_D;
break;
default:
for_each_pipe(dev_priv, p) {
u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
*pipe = p;
return true;
}
 
for_each_pipe(dev_priv, i) {
trans_dp = I915_READ(TRANS_DP_CTL(i));
if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
*pipe = i;
return true;
}
}
 
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
intel_dp->output_reg);
} else if (IS_CHERRYVIEW(dev)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
*pipe = PORT_TO_PIPE(tmp);
}
 
return true;
1999,7 → 2256,7
}
 
static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
2010,33 → 2267,34
int dotclock;
 
tmp = I915_READ(intel_dp->output_reg);
if (tmp & DP_AUDIO_OUTPUT_ENABLE)
pipe_config->has_audio = true;
 
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
if (tmp & DP_SYNC_HS_HIGH)
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
 
if (HAS_PCH_CPT(dev) && port != PORT_A) {
u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
 
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
 
if (tmp & DP_SYNC_VS_HIGH)
if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
} else {
tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
if (tmp & DP_SYNC_HS_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
 
if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
if (tmp & DP_SYNC_VS_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
}
 
pipe_config->adjusted_mode.flags |= flags;
pipe_config->base.adjusted_mode.flags |= flags;
 
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
tmp & DP_COLOR_RANGE_16_235)
2044,6 → 2302,9
 
pipe_config->has_dp_encoder = true;
 
pipe_config->lane_count =
((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
 
intel_dp_get_m_n(crtc, pipe_config);
 
if (port == PORT_A) {
2059,7 → 2320,7
if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2088,9 → 2349,12
struct drm_device *dev = encoder->base.dev;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
if (crtc->config.has_audio)
if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
 
if (HAS_PSR(dev) && !HAS_DDI(dev))
intel_psr_disable(intel_dp);
 
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
2120,40 → 2384,64
intel_dp_link_down(intel_dp);
}
 
static void chv_post_disable_dp(struct intel_encoder *encoder)
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
 
intel_dp_link_down(intel_dp);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
mutex_lock(&dev_priv->dpio_lock);
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
}
 
/* Propagate soft reset to data lane reset */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
}
}
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
static void chv_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
intel_dp_link_down(intel_dp);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
 
mutex_unlock(&dev_priv->sb_lock);
}
 
static void
2192,7 → 2480,8
}
I915_WRITE(DP_TP_CTL(port), temp);
 
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
} else if ((IS_GEN7(dev) && port == PORT_A) ||
(HAS_PCH_CPT(dev) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2287,15 → 2576,21
 
pps_unlock(intel_dp);
 
if (IS_VALLEYVIEW(dev))
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
if (IS_VALLEYVIEW(dev)) {
unsigned int lane_mask = 0x0;
 
if (IS_CHERRYVIEW(dev))
lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
 
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
lane_mask);
}
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
 
if (crtc->config.has_audio) {
if (crtc->config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder);
2315,6 → 2610,7
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
intel_edp_backlight_on(intel_dp);
intel_psr_enable(intel_dp);
}
 
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2385,7 → 2681,7
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
 
WARN(encoder->connectors_active,
WARN(encoder->base.crtc,
"stealing pipe %c power sequencer from active eDP port %c\n",
pipe_name(pipe), port_name(port));
 
2446,7 → 2742,7
int pipe = intel_crtc->pipe;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
2459,7 → 2755,7
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
intel_enable_dp(encoder);
}
2477,7 → 2773,7
intel_dp_prepare(encoder);
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
2491,7 → 2787,7
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void chv_pre_enable_dp(struct intel_encoder *encoder)
2504,10 → 2800,10
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
int data, i, stagger;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2514,35 → 2810,18
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
 
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
/* Program Tx lane latency optimal setting*/
for (i = 0; i < 4; i++) {
/* Set the latency optimal bit */
data = (i == 1) ? 0x0 : 0x6;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
data << DPIO_FRC_LATENCY_SHFIT);
 
for (i = 0; i < intel_crtc->config->lane_count; i++) {
/* Set the upar bit */
if (intel_crtc->config->lane_count == 1)
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
2549,12 → 2828,56
}
 
/* Data lane stagger programming */
/* FIXME: Fix up value only after power analysis */
if (intel_crtc->config->port_clock > 270000)
stagger = 0x18;
else if (intel_crtc->config->port_clock > 135000)
stagger = 0xd;
else if (intel_crtc->config->port_clock > 67500)
stagger = 0x7;
else if (intel_crtc->config->port_clock > 33750)
stagger = 0x4;
else
stagger = 0x2;
 
mutex_unlock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
 
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
 
if (intel_crtc->config->lane_count > 2) {
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(7) |
DPIO_TX2_STAGGER_MULT(5));
}
 
/* Deassert data lane reset */
chv_data_lane_soft_reset(encoder, false);
 
mutex_unlock(&dev_priv->sb_lock);
 
intel_enable_dp(encoder);
 
/* Second common lane will stay alive on its own now */
if (dport->release_cl2_override) {
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
dport->release_cl2_override = false;
}
}
 
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
2565,12 → 2888,27
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
u32 val;
 
intel_dp_prepare(encoder);
 
mutex_lock(&dev_priv->dpio_lock);
/*
* Must trick the second common lane into life.
* Otherwise we can't even access the PLL.
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
dport->release_cl2_override =
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
 
chv_phy_powergate_lanes(encoder, true, lane_mask);
 
mutex_lock(&dev_priv->sb_lock);
 
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
 
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2599,6 → 2937,7
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
 
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
2606,6 → 2945,7
else
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
}
 
/*
* This a a bit weird since generally CL
2619,10 → 2959,43
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
u32 val;
 
mutex_lock(&dev_priv->sb_lock);
 
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
 
mutex_unlock(&dev_priv->sb_lock);
 
/*
* Leave the power down bit cleared for at least one
* lane so that chv_powergate_phy_ch() will power
* on something when the channel is otherwise unused.
* When the port is off and the override is removed
* the lanes power down anyway, so otherwise it doesn't
* really matter what the state of power down bits is
* after this.
*/
chv_phy_powergate_lanes(encoder, false, 0x0);
}
 
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
*
2671,11 → 3044,16
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (INTEL_INFO(dev)->gen >= 9)
if (IS_BROXTON(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->edp_low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (IS_VALLEYVIEW(dev))
} else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2699,6 → 3077,8
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
2751,7 → 3131,7
}
}
 
static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
2837,7 → 3217,7
return 0;
}
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2846,13 → 3226,19
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
return 0;
}
 
static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
static bool chv_need_uniq_trans_scale(uint8_t train_set)
{
return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
}
 
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2933,7 → 3319,7
return 0;
}
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2942,11 → 3328,13
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
}
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2953,13 → 3341,15
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
 
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
}
 
/* Program swing deemph */
for (i = 0; i < 4; i++) {
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
2967,25 → 3357,23
}
 
/* Program swing margin */
for (i = 0; i < 4; i++) {
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
 
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
 
/*
* Supposedly this value shouldn't matter when unique transition
* scale is disabled, but in fact it does matter. Let's just
* always program the same value and hope it's OK.
*/
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
 
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
 
/* Disable unique transition scale */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
 
if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
== DP_TRAIN_PRE_EMPH_LEVEL_0) &&
((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
== DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
 
/*
* The document said it needs to set bit 27 for ch0 and bit 26
* for ch1. Might be a typo in the doc.
2992,36 → 3380,28
* For now, for this unique transition scale selection, set bit
* 27 for ch0 and ch1.
*/
for (i = 0; i < 4; i++) {
for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
if (chv_need_uniq_trans_scale(train_set))
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
 
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
}
 
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
if (intel_crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
}
 
/* LRC Bypass */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
val |= DPIO_LRC_BYPASS;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
mutex_unlock(&dev_priv->sb_lock);
 
mutex_unlock(&dev_priv->dpio_lock);
 
return 0;
}
 
3058,7 → 3438,7
}
 
static uint32_t
intel_gen4_signal_levels(uint8_t train_set)
gen4_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
 
3097,7 → 3477,7
 
/* Gen6's DP voltage swing and pre-emphasis control */
static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set)
gen6_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
3125,7 → 3505,7
 
/* Gen7's DP voltage swing and pre-emphasis control */
static uint32_t
intel_gen7_edp_signal_levels(uint8_t train_set)
gen7_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
3154,40 → 3534,6
}
}
 
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
static uint32_t
intel_hsw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return DDI_BUF_TRANS_SELECT(0);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(1);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return DDI_BUF_TRANS_SELECT(2);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
return DDI_BUF_TRANS_SELECT(3);
 
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return DDI_BUF_TRANS_SELECT(4);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(5);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return DDI_BUF_TRANS_SELECT(6);
 
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return DDI_BUF_TRANS_SELECT(7);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(8);
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return DDI_BUF_TRANS_SELECT(0);
}
}
 
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3195,31 → 3541,40
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
uint32_t signal_levels, mask;
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
signal_levels = intel_hsw_signal_levels(train_set);
if (HAS_DDI(dev)) {
signal_levels = ddi_signal_levels(intel_dp);
 
if (IS_BROXTON(dev))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
signal_levels = intel_chv_signal_levels(intel_dp);
mask = 0;
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev)) {
signal_levels = intel_vlv_signal_levels(intel_dp);
mask = 0;
signal_levels = vlv_signal_levels(intel_dp);
} else if (IS_GEN7(dev) && port == PORT_A) {
signal_levels = intel_gen7_edp_signal_levels(train_set);
signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev) && port == PORT_A) {
signal_levels = intel_gen6_edp_signal_levels(train_set);
signal_levels = gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
signal_levels = intel_gen4_signal_levels(train_set);
signal_levels = gen4_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
 
if (mask)
DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
 
DRM_DEBUG_KMS("Using vswing level %d\n",
train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT);
 
*DP = (*DP & ~mask) | signal_levels;
}
 
3229,8 → 3584,8
uint8_t dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
uint8_t buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
 
3260,6 → 3615,7
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
uint8_t dp_train_pat)
{
if (!intel_dp->train_set_valid)
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp, DP);
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3270,8 → 3626,8
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
int ret;
 
intel_get_adjust_train(intel_dp, link_status);
3318,8 → 3674,8
}
 
/* Enable corresponding port and start training pattern 1 */
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
static void
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
struct drm_device *dev = encoder->dev;
3328,16 → 3684,23
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
 
if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
 
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
&link_bw, &rate_select);
 
/* Write the link configuration data */
link_config[0] = intel_dp->link_bw;
link_config[0] = link_bw;
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
if (intel_dp->num_sink_rates)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
 
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
3370,6 → 3733,23
break;
}
 
/*
* if we used previously trained voltage and pre-emphasis values
* and we don't get clock recovery, reset link training values
*/
if (intel_dp->train_set_valid) {
DRM_DEBUG_KMS("clock recovery not ok, reset");
/* clear the flag as we are not reusing train set */
intel_dp->train_set_valid = false;
if (!intel_dp_reset_link_train(intel_dp, &DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
return;
}
continue;
}
 
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3408,17 → 3788,30
intel_dp->DP = DP;
}
 
void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
static void
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
 
/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
/*
* Training Pattern 3 for HBR2 or 1.2 devices that support it.
*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
* also mandatory for downstream devices that support HBR2.
*
* Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
* supported but still not enabled.
*/
if (intel_dp_source_supports_hbr2(dev) &&
drm_dp_tps3_supported(intel_dp->dpcd))
training_pattern = DP_TRAINING_PATTERN_3;
else if (intel_dp->link_rate == 540000)
DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
 
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, &DP,
3446,8 → 3839,10
}
 
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
3455,7 → 3850,8
continue;
}
 
if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
channel_eq = true;
break;
}
3462,7 → 3858,8
 
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
intel_dp_start_link_train(intel_dp);
intel_dp->train_set_valid = false;
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
DP_LINK_SCRAMBLING_DISABLE);
3483,10 → 3880,11
 
intel_dp->DP = DP;
 
if (channel_eq)
if (channel_eq) {
intel_dp->train_set_valid = true;
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
 
}
}
 
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
3494,15 → 3892,21
DP_TRAINING_PATTERN_DISABLE);
}
 
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
intel_dp_link_training_clock_recovery(intel_dp);
intel_dp_link_training_channel_equalization(intel_dp);
}
 
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
 
if (WARN_ON(HAS_DDI(dev)))
3513,49 → 3917,41
 
DRM_DEBUG_KMS("\n");
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
if ((IS_GEN7(dev) && port == PORT_A) ||
(HAS_PCH_CPT(dev) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
if (IS_CHERRYVIEW(dev))
DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
DP |= DP_LINK_TRAIN_PAT_IDLE;
}
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
 
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
 
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
* corresponding HDMI output on transcoder A.
*
* Combine this with another hardware workaround:
* transcoder select bit can only be cleared while the
* port is enabled.
/*
* HW workaround for IBX, we need to move the port
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
DP &= ~DP_PIPEB_SELECT;
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
/* always enable with pattern 1 (as per spec) */
DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
 
/* Changes to enable or select take place the vblank
* after being written.
*/
if (WARN_ON(crtc == NULL)) {
/* We should never try to disable a port without a crtc
* attached. For paranoia keep the code around for a
* bit. */
DP &= ~DP_PORT_EN;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
msleep(50);
} else
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
msleep(intel_dp->panel_power_down_delay);
}
 
3565,6 → 3961,7
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint8_t rev;
 
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
3585,17 → 3982,54
dev_priv->psr.sink_support = true;
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
}
 
if (INTEL_INFO(dev)->gen >= 9 &&
(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
uint8_t frame_sync_cap;
 
dev_priv->psr.sink_support = true;
intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
&frame_sync_cap, 1);
dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
/* PSR2 needs frame sync as well */
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
DRM_DEBUG_KMS("PSR2 %s on sink",
dev_priv->psr.psr2_support ? "supported" : "not supported");
}
}
 
/* Training Pattern 3 support, both source and sink */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
(IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
intel_dp->use_tps3 = false;
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
yesno(intel_dp_source_supports_hbr2(dev)),
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
 
/* Intermediate frequency support */
if (is_edp(intel_dp) &&
(intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
(intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
(rev >= 0x03)) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
 
intel_dp_dpcd_read_wake(&intel_dp->aux,
DP_SUPPORTED_LINK_RATES,
sink_rates,
sizeof(sink_rates));
 
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
int val = le16_to_cpu(sink_rates[i]);
 
if (val == 0)
break;
 
/* Value read is in kHz while drm clock is saved in deca-kHz */
intel_dp->sink_rates[i] = (val * 200) / 10;
}
intel_dp->num_sink_rates = i;
}
 
intel_dp_print_rates(intel_dp);
 
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
3653,16 → 4087,45
return intel_dp->is_mst;
}
 
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int test_crc_count;
int attempts = 6;
int ret = 0;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
ret = -EIO;
goto out;
}
 
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
buf & ~DP_TEST_SINK_START) < 0) {
DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
ret = -EIO;
goto out;
}
 
intel_dp->sink_crc.started = false;
out:
hsw_enable_ips(intel_crtc);
return ret;
}
 
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int ret;
 
if (intel_dp->sink_crc.started) {
ret = intel_dp_sink_crc_stop(intel_dp);
if (ret)
return ret;
}
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
 
3669,41 → 4132,83
if (!(buf & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
 
intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
return -EIO;
 
hsw_disable_ips(intel_crtc);
 
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
buf | DP_TEST_SINK_START) < 0)
buf | DP_TEST_SINK_START) < 0) {
hsw_enable_ips(intel_crtc);
return -EIO;
}
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
test_crc_count = buf & DP_TEST_COUNT_MASK;
intel_dp->sink_crc.started = true;
return 0;
}
 
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
int count, ret;
int attempts = 6;
bool old_equal_new;
 
ret = intel_dp_sink_crc_start(intel_dp);
if (ret)
return ret;
 
do {
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
intel_wait_for_vblank(dev, intel_crtc->pipe);
} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
 
if (attempts == 0) {
DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
return -ETIMEDOUT;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
ret = -EIO;
goto stop;
}
count = buf & DP_TEST_COUNT_MASK;
 
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
return -EIO;
/*
* Count might be reset during the loop. In this case
* last known count needs to be reset as well.
*/
if (count == 0)
intel_dp->sink_crc.last_count = 0;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
return -EIO;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
buf & ~DP_TEST_SINK_START) < 0)
return -EIO;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
ret = -EIO;
goto stop;
}
 
return 0;
old_equal_new = (count == intel_dp->sink_crc.last_count &&
!memcmp(intel_dp->sink_crc.last_crc, crc,
6 * sizeof(u8)));
 
} while (--attempts && (count == 0 || old_equal_new));
 
intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
 
if (attempts == 0) {
if (old_equal_new) {
DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
} else {
DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
ret = -ETIMEDOUT;
goto stop;
}
}
 
stop:
intel_dp_sink_crc_stop(intel_dp);
return ret;
}
 
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
3726,13 → 4231,123
return true;
}
 
static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
/* NAK by default */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
uint8_t test_result = DP_TEST_ACK;
return test_result;
}
 
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_NAK;
return test_result;
}
 
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_NAK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
 
if (intel_connector->detect_edid == NULL ||
connector->edid_corrupt ||
intel_dp->aux.i2c_defer_count > 6) {
/* Check EDID read for NACKs, DEFERs and corruption
* (DP CTS 1.2 Core r1.1)
* 4.2.2.4 : Failed EDID read, I2C_NAK
* 4.2.2.5 : Failed EDID read, I2C_DEFER
* 4.2.2.6 : EDID corruption detected
* Use failsafe mode for all cases
*/
if (intel_dp->aux.i2c_nack_count > 0 ||
intel_dp->aux.i2c_defer_count > 0)
DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
intel_dp->aux.i2c_nack_count,
intel_dp->aux.i2c_defer_count);
intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
struct edid *block = intel_connector->detect_edid;
 
/* We have to write the checksum
* of the last block read
*/
block += intel_connector->detect_edid->extensions;
 
if (!drm_dp_dpcd_write(&intel_dp->aux,
DP_TEST_EDID_CHECKSUM,
&block->checksum,
1))
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
 
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
}
 
/* Set test active flag here so userspace doesn't interrupt things */
intel_dp->compliance_test_active = 1;
 
return test_result;
}
 
static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_NAK;
return test_result;
}
 
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
uint8_t response = DP_TEST_NAK;
uint8_t rxdata = 0;
int status = 0;
 
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
 
intel_dp->aux.i2c_nack_count = 0;
intel_dp->aux.i2c_defer_count = 0;
 
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
if (status <= 0) {
DRM_DEBUG_KMS("Could not read test request from sink\n");
goto update_status;
}
 
switch (rxdata) {
case DP_TEST_LINK_TRAINING:
DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
response = intel_dp_autotest_link_training(intel_dp);
break;
case DP_TEST_LINK_VIDEO_PATTERN:
DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
response = intel_dp_autotest_video_pattern(intel_dp);
break;
case DP_TEST_LINK_EDID_READ:
DRM_DEBUG_KMS("EDID test requested\n");
intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
response = intel_dp_autotest_edid(intel_dp);
break;
case DP_TEST_LINK_PHY_TEST_PATTERN:
DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
response = intel_dp_autotest_phy_pattern(intel_dp);
break;
default:
DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
break;
}
 
update_status:
status = drm_dp_dpcd_write(&intel_dp->aux,
DP_TEST_RESPONSE,
&response, 1);
if (status <= 0)
DRM_DEBUG_KMS("Could not write test response to sink\n");
}
 
static int
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
3748,14 → 4363,14
if (bret == true) {
 
/* check link status - esi[10] = 0x200c */
if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
if (intel_dp->active_mst_links &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
 
DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
DRM_DEBUG_KMS("got esi %3ph\n", esi);
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
 
if (handled) {
3771,7 → 4386,7
 
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (bret == true) {
DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
goto go_again;
}
} else
3798,7 → 4413,7
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
void
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
3808,12 → 4423,9
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
if (!intel_encoder->connectors_active)
if (!intel_encoder->base.crtc)
return;
 
if (WARN_ON(!intel_encoder->base.crtc))
return;
 
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
 
3836,7 → 4448,7
sink_irq_vector);
 
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
intel_dp_handle_test_request(intel_dp);
DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
3845,7 → 4457,6
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
}
3913,41 → 4524,65
return status;
}
 
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
u32 bit;
 
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
return connector_status_disconnected;
switch (port->port) {
case PORT_A:
return true;
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
case PORT_C:
bit = SDE_PORTC_HOTPLUG;
break;
case PORT_D:
bit = SDE_PORTD_HOTPLUG;
break;
default:
MISSING_CASE(port->port);
return false;
}
 
return intel_dp_detect_dpcd(intel_dp);
return I915_READ(SDEISR) & bit;
}
 
static int g4x_digital_port_connected(struct drm_device *dev,
struct intel_digital_port *intel_dig_port)
static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
u32 bit;
 
if (IS_VALLEYVIEW(dev)) {
switch (intel_dig_port->port) {
switch (port->port) {
case PORT_A:
return true;
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
bit = SDE_PORTB_HOTPLUG_CPT;
break;
case PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
bit = SDE_PORTC_HOTPLUG_CPT;
break;
case PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
bit = SDE_PORTD_HOTPLUG_CPT;
break;
case PORT_E:
bit = SDE_PORTE_HOTPLUG_SPT;
break;
default:
return -EINVAL;
MISSING_CASE(port->port);
return false;
}
} else {
switch (intel_dig_port->port) {
 
return I915_READ(SDEISR) & bit;
}
 
static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
u32 bit;
 
switch (port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
break;
3958,21 → 4593,87
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
break;
default:
return -EINVAL;
MISSING_CASE(port->port);
return false;
}
 
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
 
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
return 0;
return 1;
static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
u32 bit;
 
switch (port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
break;
case PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
break;
case PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
break;
default:
MISSING_CASE(port->port);
return false;
}
 
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
 
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *intel_dig_port)
{
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum port port;
u32 bit = 0;
 
 
return I915_READ(GEN8_DE_PORT_ISR) & bit;
}
 
/*
* intel_digital_port_connected - is the specified port connected?
* @dev_priv: i915 private structure
* @port: the port to test
*
* Return %true if @port is connected, %false otherwise.
*/
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (HAS_PCH_IBX(dev_priv))
return ibx_digital_port_connected(dev_priv, port);
if (HAS_PCH_SPLIT(dev_priv))
return cpt_digital_port_connected(dev_priv, port);
else if (IS_BROXTON(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
else if (IS_VALLEYVIEW(dev_priv))
return vlv_digital_port_connected(dev_priv, port);
else
return g4x_digital_port_connected(dev_priv, port);
}
 
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
return connector_status_disconnected;
 
return intel_dp_detect_dpcd(intel_dp);
}
 
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
int ret;
 
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
3984,10 → 4685,7
return status;
}
 
ret = g4x_digital_port_connected(dev, intel_dig_port);
if (ret == -EINVAL)
return connector_status_unknown;
else if (ret == 0)
if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
return connector_status_disconnected;
 
return intel_dp_detect_dpcd(intel_dp);
4036,26 → 4734,6
intel_dp->has_audio = false;
}
 
static enum intel_display_power_domain
intel_dp_power_get(struct intel_dp *dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
enum intel_display_power_domain power_domain;
 
power_domain = intel_display_port_power_domain(encoder);
intel_display_power_get(to_i915(encoder->base.dev), power_domain);
 
return power_domain;
}
 
static void
intel_dp_power_put(struct intel_dp *dp,
enum intel_display_power_domain power_domain)
{
struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
intel_display_power_put(to_i915(encoder->base.dev), power_domain);
}
 
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
4066,6 → 4744,7
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
bool ret;
u8 sink_irq_vector;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
4078,7 → 4757,8
return connector_status_disconnected;
}
 
power_domain = intel_dp_power_get(intel_dp);
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(to_i915(dev), power_domain);
 
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp))
4108,8 → 4788,22
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_connected;
 
/* Try to read the source of the interrupt */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
/* Clear interrupt source */
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector);
 
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
intel_dp_handle_test_request(intel_dp);
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
 
out:
intel_dp_power_put(intel_dp, power_domain);
intel_display_power_put(to_i915(dev), power_domain);
return status;
}
 
4118,6 → 4812,7
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
enum intel_display_power_domain power_domain;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4127,11 → 4822,12
if (connector->status != connector_status_connected)
return;
 
power_domain = intel_dp_power_get(intel_dp);
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
intel_dp_set_edid(intel_dp);
 
intel_dp_power_put(intel_dp, power_domain);
intel_display_power_put(dev_priv, power_domain);
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4216,7 → 4912,7
 
if (property == dev_priv->broadcast_rgb_property) {
bool old_auto = intel_dp->color_range_auto;
uint32_t old_range = intel_dp->color_range;
bool old_range = intel_dp->limited_color_range;
 
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
4224,11 → 4920,11
break;
case INTEL_BROADCAST_RGB_FULL:
intel_dp->color_range_auto = false;
intel_dp->color_range = 0;
intel_dp->limited_color_range = false;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_dp->color_range_auto = false;
intel_dp->color_range = DP_COLOR_RANGE_16_235;
intel_dp->limited_color_range = true;
break;
default:
return -EINVAL;
4235,7 → 4931,7
}
 
if (old_auto == intel_dp->color_range_auto &&
old_range == intel_dp->color_range)
old_range == intel_dp->limited_color_range)
return 0;
 
goto done;
4292,7 → 4988,6
 
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
4304,6 → 4999,7
pps_unlock(intel_dp);
 
}
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
 
4343,7 → 5039,7
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
power_domain = intel_display_port_power_domain(&intel_dig_port->base);
power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
intel_display_power_get(dev_priv, power_domain);
 
edp_panel_vdd_schedule_off(intel_dp);
4373,12 → 5069,15
}
 
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_detect,
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
 
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4392,13 → 5091,7
.destroy = intel_dp_encoder_destroy,
};
 
void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
return;
}
 
bool
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
4406,9 → 5099,10
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
bool ret = true;
enum irqreturn ret = IRQ_NONE;
 
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4420,7 → 5114,7
*/
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
port_name(intel_dig_port->port));
return false;
return IRQ_HANDLED;
}
 
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4427,18 → 5121,15
port_name(intel_dig_port->port),
long_hpd ? "long" : "short");
 
power_domain = intel_display_port_power_domain(intel_encoder);
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
if (long_hpd) {
/* indicate that we need to restart link training */
intel_dp->train_set_valid = false;
 
if (HAS_PCH_SPLIT(dev)) {
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail;
} else {
if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
goto mst_fail;
}
 
if (!intel_dp_get_dpcd(intel_dp)) {
goto mst_fail;
4446,9 → 5137,12
 
intel_dp_probe_oui(intel_dp);
 
if (!intel_dp_probe_mst(intel_dp))
if (!intel_dp_probe_mst(intel_dp)) {
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
goto mst_fail;
 
}
} else {
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4456,16 → 5150,14
}
 
if (!intel_dp->is_mst) {
/*
* we'll check the link status via the normal hot plug path later -
* but for short hpds we should check it now
*/
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
ret = false;
 
ret = IRQ_HANDLED;
 
goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
4499,7 → 5191,7
return -1;
}
 
/* check the VBT to see whether the eDP is on DP-D port */
/* check the VBT to see whether the eDP is on another port */
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4506,11 → 5198,19
union child_device_config *p_child;
int i;
static const short port_mapping[] = {
[PORT_B] = PORT_IDPB,
[PORT_C] = PORT_IDPC,
[PORT_D] = PORT_IDPD,
[PORT_B] = DVO_PORT_DPB,
[PORT_C] = DVO_PORT_DPC,
[PORT_D] = DVO_PORT_DPD,
[PORT_E] = DVO_PORT_DPE,
};
 
/*
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
if (INTEL_INFO(dev)->gen < 5)
return false;
 
if (port == PORT_A)
return true;
 
4561,8 → 5261,8
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div, pp;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
4570,7 → 5270,16
if (final->t11_t12 != 0)
return;
 
if (HAS_PCH_SPLIT(dev)) {
if (IS_BROXTON(dev)) {
/*
* TODO: BXT has 2 sets of PPS registers.
* Correct Register for Broxton need to be identified
* using VBT. hardcoding for now
*/
pp_ctrl_reg = BXT_PP_CONTROL(0);
pp_on_reg = BXT_PP_ON_DELAYS(0);
pp_off_reg = BXT_PP_OFF_DELAYS(0);
} else if (HAS_PCH_SPLIT(dev)) {
pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
4586,12 → 5295,14
 
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp = ironlake_get_pp_control(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
pp_ctl = ironlake_get_pp_control(intel_dp);
 
pp_on = I915_READ(pp_on_reg);
pp_off = I915_READ(pp_off_reg);
if (!IS_BROXTON(dev)) {
I915_WRITE(pp_ctrl_reg, pp_ctl);
pp_div = I915_READ(pp_div_reg);
}
 
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4606,8 → 5317,17
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
 
if (IS_BROXTON(dev)) {
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT;
if (tmp > 0)
cur.t11_t12 = (tmp - 1) * 1000;
else
cur.t11_t12 = 0;
} else {
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
}
 
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4664,13 → 5384,23
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
int pp_on_reg, pp_off_reg, pp_div_reg;
int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (HAS_PCH_SPLIT(dev)) {
if (IS_BROXTON(dev)) {
/*
* TODO: BXT has 2 sets of PPS registers.
* Correct Register for Broxton need to be identified
* using VBT. hardcoding for now
*/
pp_ctrl_reg = BXT_PP_CONTROL(0);
pp_on_reg = BXT_PP_ON_DELAYS(0);
pp_off_reg = BXT_PP_OFF_DELAYS(0);
 
} else if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_div_reg = PCH_PP_DIVISOR;
4696,9 → 5426,16
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
if (IS_BROXTON(dev)) {
pp_div = I915_READ(pp_ctrl_reg);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
<< BXT_POWER_CYCLE_DELAY_SHIFT);
} else {
pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< PANEL_POWER_CYCLE_DELAY_SHIFT);
}
 
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
4715,24 → 5452,40
 
I915_WRITE(pp_on_reg, pp_on);
I915_WRITE(pp_off_reg, pp_off);
if (IS_BROXTON(dev))
I915_WRITE(pp_ctrl_reg, pp_div);
else
I915_WRITE(pp_div_reg, pp_div);
 
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(pp_on_reg),
I915_READ(pp_off_reg),
IS_BROXTON(dev) ?
(I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(pp_div_reg));
}
 
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
/**
* intel_dp_set_drrs_state - program registers for RR switch to take effect
* @dev: DRM device
* @refresh_rate: RR to be programmed
*
* This function gets called when refresh rate (RR) has to be changed from
* one frequency to another. Switches can be between high and low RR
* supported by the panel or to any other RR based on media playback (in
* this case, RR value needs to be passed from user space).
*
* The caller of this function needs to take a lock on dev_priv->drrs.
*/
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct intel_dp *intel_dp = NULL;
struct intel_crtc_config *config = NULL;
struct intel_digital_port *dig_port = NULL;
struct intel_dp *intel_dp = dev_priv->drrs.dp;
struct intel_crtc_state *config = NULL;
struct intel_crtc *intel_crtc = NULL;
struct intel_connector *intel_connector = dev_priv->drrs.connector;
u32 reg, val;
enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
if (refresh_rate <= 0) {
DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4739,24 → 5492,19
return;
}
 
if (intel_connector == NULL) {
DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
if (intel_dp == NULL) {
DRM_DEBUG_KMS("DRRS not supported.\n");
return;
}
 
/*
* FIXME: This needs proper synchronization with psr state. But really
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
* FIXME: This needs proper synchronization with psr state for some
* platforms that cannot have PSR and DRRS enabled at the same time.
*/
if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
 
encoder = intel_attached_encoder(&intel_connector->base);
intel_dp = enc_to_intel_dp(&encoder->base);
intel_crtc = encoder->new_crtc;
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
intel_crtc = to_intel_crtc(encoder->base.crtc);
 
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4763,17 → 5511,18
return;
}
 
config = &intel_crtc->config;
config = intel_crtc->config;
 
if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
return;
}
 
if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
refresh_rate)
index = DRRS_LOW_RR;
 
if (index == intel_dp->drrs_state.refresh_rate_type) {
if (index == dev_priv->drrs.refresh_rate_type) {
DRM_DEBUG_KMS(
"DRRS requested for previously set RR...ignoring\n");
return;
4784,45 → 5533,288
return;
}
 
if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
reg = PIPECONF(intel_crtc->config.cpu_transcoder);
if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
switch (index) {
case DRRS_HIGH_RR:
intel_dp_set_m_n(intel_crtc, M1_N1);
break;
case DRRS_LOW_RR:
intel_dp_set_m_n(intel_crtc, M2_N2);
break;
case DRRS_MAX_RR:
default:
DRM_ERROR("Unsupported refreshrate type\n");
}
} else if (INTEL_INFO(dev)->gen > 6) {
u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
u32 val;
 
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
if (IS_VALLEYVIEW(dev))
val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val |= PIPECONF_EDP_RR_MODE_SWITCH;
intel_dp_set_m_n(intel_crtc);
} else {
if (IS_VALLEYVIEW(dev))
val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
I915_WRITE(reg, val);
}
 
dev_priv->drrs.refresh_rate_type = index;
 
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
}
 
/**
* intel_edp_drrs_enable - init drrs struct if supported
* @intel_dp: DP struct
*
* Initializes frontbuffer_bits and drrs.dp
*/
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (!intel_crtc->config->has_drrs) {
DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
return;
}
 
mutex_lock(&dev_priv->drrs.mutex);
if (WARN_ON(dev_priv->drrs.dp)) {
DRM_ERROR("DRRS already enabled\n");
goto unlock;
}
 
dev_priv->drrs.busy_frontbuffer_bits = 0;
 
dev_priv->drrs.dp = intel_dp;
 
unlock:
mutex_unlock(&dev_priv->drrs.mutex);
}
 
/**
* intel_edp_drrs_disable - Disable DRRS
* @intel_dp: DP struct
*
*/
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (!intel_crtc->config->has_drrs)
return;
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
mutex_unlock(&dev_priv->drrs.mutex);
return;
}
 
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv->dev,
intel_dp->attached_connector->panel.
fixed_mode->vrefresh);
 
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
 
cancel_delayed_work_sync(&dev_priv->drrs.work);
}
 
static void intel_edp_drrs_downclock_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), drrs.work.work);
struct intel_dp *intel_dp;
 
mutex_lock(&dev_priv->drrs.mutex);
 
intel_dp = dev_priv->drrs.dp;
 
if (!intel_dp)
goto unlock;
 
/*
* mutex taken to ensure that there is no race between differnt
* drrs calls trying to update refresh rate. This scenario may occur
* in future when idleness detection based DRRS in kernel and
* possible calls from user space to set differnt RR are made.
* The delayed work can race with an invalidate hence we need to
* recheck.
*/
 
mutex_lock(&intel_dp->drrs_state.mutex);
if (dev_priv->drrs.busy_frontbuffer_bits)
goto unlock;
 
intel_dp->drrs_state.refresh_rate_type = index;
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv->dev,
intel_dp->attached_connector->panel.
downclock_mode->vrefresh);
 
mutex_unlock(&intel_dp->drrs_state.mutex);
unlock:
mutex_unlock(&dev_priv->drrs.mutex);
}
 
DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
/**
* intel_edp_drrs_invalidate - Disable Idleness DRRS
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called everytime rendering on the given planes start.
* Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
mutex_unlock(&dev_priv->drrs.mutex);
return;
}
 
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
 
/* invalidate means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv->dev,
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
 
mutex_unlock(&dev_priv->drrs.mutex);
}
 
/**
* intel_edp_drrs_flush - Restart Idleness DRRS
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called every time rendering on the given planes has
* completed or flip on a crtc is completed. So DRRS should be upclocked
* (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
* if no other planes are dirty.
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
void intel_edp_drrs_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
return;
 
// cancel_delayed_work(&dev_priv->drrs.work);
 
mutex_lock(&dev_priv->drrs.mutex);
if (!dev_priv->drrs.dp) {
mutex_unlock(&dev_priv->drrs.mutex);
return;
}
 
crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
/* flush means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv->dev,
dev_priv->drrs.dp->attached_connector->panel.
fixed_mode->vrefresh);
 
mutex_unlock(&dev_priv->drrs.mutex);
}
 
/**
* DOC: Display Refresh Rate Switching (DRRS)
*
* Display Refresh Rate Switching (DRRS) is a power conservation feature
* which enables swtching between low and high refresh rates,
* dynamically, based on the usage scenario. This feature is applicable
* for internal panels.
*
* Indication that the panel supports DRRS is given by the panel EDID, which
* would list multiple refresh rates for one resolution.
*
* DRRS is of 2 types - static and seamless.
* Static DRRS involves changing refresh rate (RR) by doing a full modeset
* (may appear as a blink on screen) and is used in dock-undock scenario.
* Seamless DRRS involves changing RR without any visual effect to the user
* and can be used during normal system usage. This is done by programming
* certain registers.
*
* Support for static/seamless DRRS may be indicated in the VBT based on
* inputs from the panel spec.
*
* DRRS saves power by switching to low RR based on usage scenarios.
*
* eDP DRRS:-
* The implementation is based on frontbuffer tracking implementation.
* When there is a disturbance on the screen triggered by user activity or a
* periodic system activity, DRRS is disabled (RR is changed to high RR).
* When there is no movement on screen, after a timeout of 1 second, a switch
* to low RR is made.
* For integration with frontbuffer tracking code,
* intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
*
* DRRS can be further extended to support other internal panels and also
* the scenario of video playback wherein RR is set based on the rate
* requested by userspace.
*/
 
/**
* intel_dp_drrs_init - Init basic DRRS work and mutex.
* @intel_connector: eDP connector
* @fixed_mode: preferred mode of panel
*
* This function is called only once at driver load to initialize basic
* DRRS stuff.
*
* Returns:
* Downclock mode if panel supports it, else return NULL.
* DRRS support is determined by the presence of downclock mode (apart
* from VBT setting).
*/
static struct drm_display_mode *
intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector,
intel_dp_drrs_init(struct intel_connector *intel_connector,
struct drm_display_mode *fixed_mode)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *downclock_mode = NULL;
 
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
mutex_init(&dev_priv->drrs.mutex);
 
if (INTEL_INFO(dev)->gen <= 6) {
DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
return NULL;
4837,17 → 5829,13
(dev, fixed_mode, connector);
 
if (!downclock_mode) {
DRM_DEBUG_KMS("DRRS not supported\n");
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL;
}
 
dev_priv->drrs.connector = intel_connector;
dev_priv->drrs.type = dev_priv->vbt.drrs_type;
 
mutex_init(&intel_dp->drrs_state.mutex);
 
intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
 
intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
4867,8 → 5855,6
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
 
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
 
if (!is_edp(intel_dp))
return true;
 
4916,7 → 5902,6
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
intel_dig_port,
intel_connector, fixed_mode);
break;
}
4954,7 → 5939,7
}
 
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight_power = intel_edp_backlight_power;
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
 
return true;
5042,6 → 6027,8
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
intel_encoder->hpd_pin = HPD_PORT_C;
5049,6 → 6036,9
case PORT_D:
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
intel_encoder->hpd_pin = HPD_PORT_E;
break;
default:
BUG();
}
5066,12 → 6056,10
intel_dp_aux_init(intel_dp, intel_connector);
 
/* init MST on ports that can support it */
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (port == PORT_B || port == PORT_C || port == PORT_D) {
if (HAS_DP_MST(dev) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
}
}
 
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux);
5117,11 → 6105,9
if (!intel_dig_port)
return;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
}
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
 
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
5139,6 → 6125,7
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
} else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
5164,17 → 6151,23
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
}
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
 
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hpd_irq_port[port] = intel_dig_port;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
 
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
 
return;
 
err_init_connector:
drm_encoder_cleanup(encoder);
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
kfree(intel_connector);
 
return;
}
}
 
void intel_dp_mst_suspend(struct drm_device *dev)
{
5183,7 → 6176,7
 
/* disable MST */
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
if (!intel_dig_port)
continue;
 
5202,7 → 6195,7
int i;
 
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
if (!intel_dig_port)
continue;
if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
/drivers/video/drm/i915/intel_dp_mst.c
26,20 → 26,24
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
 
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = encoder->base.dev;
int bpp;
struct drm_atomic_state *state;
int bpp, i;
int lane_count, slots;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct intel_connector *found = NULL, *intel_connector;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct drm_connector *drm_connector;
struct intel_connector *connector, *found = NULL;
struct drm_connector_state *connector_state;
int mst_pbn;
 
pipe_config->dp_encoder_is_mst = true;
51,15 → 55,20
* seem to suggest we should do otherwise.
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
intel_dp->lane_count = lane_count;
 
 
pipe_config->lane_count = lane_count;
 
pipe_config->pipe_bpp = 24;
pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
 
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
state = pipe_config->base.state;
 
for_each_connector_in_state(state, drm_connector, connector_state, i) {
connector = to_intel_connector(drm_connector);
 
if (connector_state->best_encoder == &encoder->base) {
found = connector;
break;
}
}
69,7 → 78,7
return false;
}
 
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 
pipe_config->pbn = mst_pbn;
slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
80,6 → 89,10
&pipe_config->dp_m_n);
 
pipe_config->dp_m_n.tu = slots;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config);
 
return true;
 
}
134,14 → 147,14
enum port port = intel_dig_port->port;
int ret;
uint32_t temp;
struct intel_connector *found = NULL, *intel_connector;
struct intel_connector *found = NULL, *connector;
int slots;
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
for_each_intel_connector(dev, connector) {
if (connector->base.state->best_encoder == &encoder->base) {
found = connector;
break;
}
}
151,6 → 164,11
return;
}
 
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
*/
found->encoder = encoder;
 
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
intel_mst->port = found->port;
 
157,8 → 175,13
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
 
I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
intel_dp_set_link_params(intel_dp, intel_crtc->config);
 
/* FIXME: add support for SKL */
if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port),
intel_crtc->config->ddi_pll_sel);
 
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
165,12 → 188,12
 
 
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
 
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
intel_mst->port, intel_crtc->config.pbn, &slots);
intel_mst->port,
intel_crtc->config->pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
return;
216,7 → 239,7
}
 
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
223,7 → 246,7
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
 
pipe_config->has_dp_encoder = true;
254,7 → 277,11
default:
break;
}
pipe_config->adjusted_mode.flags |= flags;
pipe_config->base.adjusted_mode.flags |= flags;
 
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
 
intel_dp_get_m_n(crtc, pipe_config);
 
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
307,11 → 334,14
}
 
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.dpms = intel_connector_dpms,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_dp_mst_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
 
static int intel_dp_mst_get_modes(struct drm_connector *connector)
333,6 → 363,16
return MODE_OK;
}
 
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *state)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct intel_crtc *crtc = to_intel_crtc(state->crtc);
 
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
 
static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
343,6 → 383,7
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.best_encoder = intel_mst_best_encoder,
};
 
360,7 → 401,7
 
static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
{
if (connector->encoder) {
if (connector->encoder && connector->base.state->crtc) {
enum pipe pipe;
if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
return false;
371,7 → 412,7
 
static void intel_connector_add_to_fbdev(struct intel_connector *connector)
{
#ifdef CONFIG_DRM_I915_FBDEV
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
379,7 → 420,7
 
static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{
#ifdef CONFIG_DRM_I915_FBDEV
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
394,7 → 435,7
struct drm_connector *connector;
int i;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
intel_connector = intel_connector_alloc();
if (!intel_connector)
return NULL;
 
417,12 → 458,17
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
mutex_lock(&dev->mode_config.mutex);
return connector;
}
 
static void intel_dp_register_mst_connector(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
drm_connector_register(&intel_connector->base);
return connector;
}
 
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
430,20 → 476,29
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
 
/* need to nuke the connector */
mutex_lock(&dev->mode_config.mutex);
intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
struct drm_mode_set set;
int ret;
 
memset(&set, 0, sizeof(set));
set.crtc = connector->state->crtc,
 
ret = drm_atomic_helper_set_config(&set);
 
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
drm_modeset_unlock_all(dev);
 
intel_connector->unregister(intel_connector);
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
 
drm_reinit_primary_mode_group(dev);
 
kfree(intel_connector);
DRM_DEBUG_KMS("\n");
}
459,6 → 514,7
 
static struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
};
/drivers/video/drm/i915/intel_drv.h
35,10 → 35,8
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
 
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 
/**
* _wait_for - magic (register) wait macro
*
132,18 → 130,12
 
struct intel_encoder {
struct drm_encoder base;
/*
* The new crtc this encoder will be driven from. Only differs from
* base->crtc while a modeset is in progress.
*/
struct intel_crtc *new_crtc;
 
enum intel_output_type type;
unsigned int cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
struct intel_crtc_config *);
struct intel_crtc_state *);
void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
150,6 → 142,7
void (*mode_set)(struct intel_encoder *intel_encoder);
void (*disable)(struct intel_encoder *);
void (*post_disable)(struct intel_encoder *);
void (*post_pll_disable)(struct intel_encoder *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
159,7 → 152,7
* pre-filled the pipe config. Note that intel_encoder->base.crtc must
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_config *pipe_config);
struct intel_crtc_state *pipe_config);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
184,10 → 177,24
bool enabled;
bool combination_mode; /* gen 2/4 only */
bool active_low_pwm;
 
/* PWM chip */
bool util_pin_active_low; /* bxt+ */
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
 
struct backlight_device *device;
 
/* Connector and platform specific backlight functions */
int (*setup)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get)(struct intel_connector *connector);
void (*set)(struct intel_connector *connector, uint32_t level);
void (*disable)(struct intel_connector *connector);
void (*enable)(struct intel_connector *connector);
uint32_t (*hz_to_pwm)(struct intel_connector *connector,
uint32_t hz);
void (*power)(struct intel_connector *, bool enable);
} backlight;
 
void (*backlight_power)(struct intel_connector *, bool enable);
};
 
struct intel_connector {
197,12 → 204,6
*/
struct intel_encoder *encoder;
 
/*
* The new encoder this connector will be driven. Only differs from
* encoder while a modeset is in progress.
*/
struct intel_encoder *new_encoder;
 
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
243,24 → 244,99
int p;
} intel_clock_t;
 
struct intel_atomic_state {
struct drm_atomic_state base;
 
unsigned int cdclk;
bool dpll_set;
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
};
 
struct intel_plane_state {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_plane_state base;
struct drm_rect src;
struct drm_rect dst;
struct drm_rect clip;
struct drm_rect orig_src;
struct drm_rect orig_dst;
bool visible;
 
/*
* scaler_id
* = -1 : not using a scaler
* >= 0 : using a scalers
*
* plane requiring a scaler:
* - During check_plane, its bit is set in
* crtc_state->scaler_state.scaler_users by calling helper function
* update_scaler_plane.
* - scaler_id indicates the scaler it got assigned.
*
* plane doesn't require a scaler:
* - this can happen when scaling is no more required or plane simply
* got disabled.
* - During check_plane, corresponding bit is reset in
* crtc_state->scaler_state.scaler_users by calling helper function
* update_scaler_plane.
*/
int scaler_id;
 
struct drm_intel_sprite_colorkey ckey;
};
 
struct intel_plane_config {
bool tiled;
struct intel_initial_plane_config {
struct intel_framebuffer *fb;
unsigned int tiling;
int size;
u32 base;
};
 
struct intel_crtc_config {
#define SKL_MIN_SRC_W 8
#define SKL_MAX_SRC_W 4096
#define SKL_MIN_SRC_H 8
#define SKL_MAX_SRC_H 4096
#define SKL_MIN_DST_W 8
#define SKL_MAX_DST_W 4096
#define SKL_MIN_DST_H 8
#define SKL_MAX_DST_H 4096
 
struct intel_scaler {
int in_use;
uint32_t mode;
};
 
struct intel_crtc_scaler_state {
#define SKL_NUM_SCALERS 2
struct intel_scaler scalers[SKL_NUM_SCALERS];
 
/*
* scaler_users: keeps track of users requesting scalers on this crtc.
*
* If a bit is set, a user is using a scaler.
* Here user can be a plane or crtc as defined below:
* bits 0-30 - plane (bit position is index from drm_plane_index)
* bit 31 - crtc
*
* Instead of creating a new index to cover planes and crtc, using
* existing drm_plane_index for planes which is well less than 31
* planes and bit 31 for crtc. This should be fine to cover all
* our platforms.
*
* intel_atomic_setup_scalers will setup available scalers to users
* requesting scalers. It will gracefully fail if request exceeds
* avilability.
*/
#define SKL_CRTC_INDEX 31
unsigned scaler_users;
 
/* scaler used by crtc for panel fitting purpose */
int scaler_id;
};
 
/* drm_mode->private_flags */
#define I915_MODE_FLAG_INHERITED 1
 
struct intel_crtc_state {
struct drm_crtc_state base;
 
/**
* quirks - bitfield with hw state readout quirks
*
270,18 → 346,9
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
 
/* User requested mode, only valid as a starting point to
* compute adjusted_mode, except in the case of (S)DVO where
* it's also for the output timings of the (S)DVO chip.
* adjusted_mode will then correspond to the S(DVO) chip's
* preferred input timings. */
struct drm_display_mode requested_mode;
/* Actual pipe timings ie. what we program into the pipe timing
* registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
struct drm_display_mode adjusted_mode;
bool update_pipe;
 
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
369,6 → 436,8
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
 
uint8_t lane_count;
 
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
394,8 → 463,22
 
bool dp_encoder_is_mst;
int pbn;
 
struct intel_crtc_scaler_state scaler_state;
 
/* w/a for waiting 2 vblanks during crtc enable */
enum pipe hsw_workaround_pipe;
};
 
struct vlv_wm_state {
struct vlv_pipe_wm wm[3];
struct vlv_sr_wm sr[3];
uint8_t num_active_planes;
uint8_t num_levels;
uint8_t level;
bool cxsr;
};
 
struct intel_pipe_wm {
struct intel_wm_level wm[5];
uint32_t linetime;
406,9 → 489,10
};
 
struct intel_mmio_flip {
u32 seqno;
struct intel_engine_cs *ring;
struct work_struct work;
struct drm_i915_private *i915;
struct drm_i915_gem_request *req;
struct intel_crtc *crtc;
};
 
struct skl_pipe_wm {
417,6 → 501,30
uint32_t linetime;
};
 
/*
* Tracking of operations that need to be performed at the beginning/end of an
* atomic commit, outside the atomic section where interrupts are disabled.
* These are generally operations that grab mutexes or might otherwise sleep
* and thus can't be run with interrupts disabled.
*/
struct intel_crtc_atomic_commit {
/* Sleepable operations to perform before commit */
bool wait_for_flips;
bool disable_fbc;
bool disable_ips;
bool disable_cxsr;
bool pre_disable_primary;
bool update_wm_pre, update_wm_post;
unsigned disabled_planes;
 
/* Sleepable operations to perform after commit */
unsigned fb_bits;
bool wait_vblank;
bool update_fbc;
bool post_enable_primary;
unsigned update_sprite_watermarks;
};
 
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
429,7 → 537,6
*/
bool active;
unsigned long enabled_power_domains;
bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
440,18 → 547,15
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
unsigned long dspaddr_offset;
int adjusted_x;
int adjusted_y;
 
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_size;
uint32_t cursor_base;
 
struct intel_plane_config plane_config;
struct intel_crtc_config config;
struct intel_crtc_config *new_config;
bool new_enabled;
struct intel_crtc_state *config;
 
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
466,18 → 570,44
struct intel_pipe_wm active;
/* SKL wm values currently in use */
struct skl_pipe_wm skl_active;
/* allow CxSR on this pipe */
bool cxsr_allowed;
} wm;
 
int scanline_offset;
struct intel_mmio_flip mmio_flip;
 
struct {
unsigned start_vbl_count;
ktime_t start_vbl_time;
int min_vbl, max_vbl;
int scanline_start;
} debug;
 
struct intel_crtc_atomic_commit atomic;
 
/* scalers available on this crtc */
int num_scalers;
 
struct vlv_wm_state wm_state;
};
 
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
uint32_t vert_pixels;
/*
* For packed pixel formats:
* bytes_per_pixel - holds bytes per pixel
* For planar pixel formats:
* bytes_per_pixel - holds bytes per pixel for uv-plane
* y_bytes_per_pixel - holds bytes per pixel for y-plane
*/
uint8_t bytes_per_pixel;
uint8_t y_bytes_per_pixel;
bool enabled;
bool scaled;
u64 tiling;
unsigned int rotation;
uint16_t fifo_size;
};
 
struct intel_plane {
484,14 → 614,9
struct drm_plane base;
int plane;
enum pipe pipe;
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
unsigned int rotation;
uint32_t frontbuffer_bit;
 
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
500,10 → 625,15
*/
struct intel_plane_wm_parameters wm;
 
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
* the intel_plane_state structure and accessed via drm_plane->state.
*/
 
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
510,10 → 640,11
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
int (*check_plane)(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
};
 
struct intel_watermark_params {
535,17 → 666,20
unsigned long cursor_hpll_disable;
};
 
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
 
struct intel_hdmi {
u32 hdmi_reg;
int ddc_bus;
uint32_t color_range;
bool limited_color_range;
bool color_range_auto;
bool has_hdmi_sink;
bool has_audio;
552,12 → 686,13
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
enum hdmi_picture_aspect aspect_ratio;
struct intel_connector *attached_connector;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode);
const struct drm_display_mode *adjusted_mode);
bool (*infoframe_enabled)(struct drm_encoder *encoder);
};
 
564,30 → 699,49
struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
 
/**
* HIGH_RR is the highest eDP panel refresh rate read from EDID
* LOW_RR is the lowest eDP panel refresh rate found from EDID
* parsing for same resolution.
/*
* enum link_m_n_set:
* When platform provides two set of M_N registers for dp, we can
* program them and switch between them incase of DRRS.
* But When only one such register is provided, we have to program the
* required divider value on that registers itself based on the DRRS state.
*
* M1_N1 : Program dp_m_n on M1_N1 registers
* dp_m2_n2 on M2_N2 registers (If supported)
*
* M2_N2 : Program dp_m2_n2 on M1_N1 registers
* M2_N2 registers are not supported
*/
enum edp_drrs_refresh_rate_type {
DRRS_HIGH_RR,
DRRS_LOW_RR,
DRRS_MAX_RR, /* RR count */
 
enum link_m_n_set {
/* Sets the m1_n1 and m2_n2 */
M1_N1 = 0,
M2_N2
};
 
struct sink_crc {
bool started;
u8 last_crc[6];
int last_count;
};
 
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
uint32_t DP;
int link_rate;
uint8_t lane_count;
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
bool limited_color_range;
bool color_range_auto;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
struct sink_crc sink_crc;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
608,7 → 762,6
enum pipe pps_pipe;
struct edp_power_seq pps_delays;
 
bool use_tps3;
bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
628,12 → 781,12
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
struct {
enum drrs_support_type type;
enum edp_drrs_refresh_rate_type refresh_rate_type;
struct mutex mutex;
} drrs_state;
bool train_set_valid;
 
/* Displayport compliance testing */
unsigned long compliance_test_type;
unsigned long compliance_test_data;
bool compliance_test_active;
};
 
struct intel_digital_port {
642,7 → 795,8
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
bool (*hpd_pulse)(struct intel_digital_port *, bool);
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
};
 
struct intel_dp_mst_encoder {
652,7 → 806,7
void *port; /* store this opaque as its illegal to dereference it */
};
 
static inline int
static inline enum dpio_channel
vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
666,7 → 820,21
}
}
 
static inline int
static inline enum dpio_phy
vlv_dport_to_phy(struct intel_digital_port *dport)
{
switch (dport->port) {
case PORT_B:
case PORT_C:
return DPIO_PHY0;
case PORT_D:
return DPIO_PHY1;
default:
BUG();
}
}
 
static inline enum dpio_channel
vlv_pipe_to_channel(enum pipe pipe)
{
switch (pipe) {
697,7 → 865,7
struct intel_unpin_work {
struct work_struct work;
struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
atomic_t pending;
706,22 → 874,12
#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
struct intel_engine_cs *flip_queued_ring;
u32 flip_queued_seqno;
int flip_queued_vblank;
int flip_ready_vblank;
struct drm_i915_gem_request *flip_queued_req;
u32 flip_queued_vblank;
u32 flip_ready_vblank;
bool enable_stall_check;
};
 
struct intel_set_config {
struct drm_encoder **save_connector_encoders;
struct drm_crtc **save_encoder_crtcs;
bool *save_crtc_enabled;
 
bool fb_changed;
bool mode_changed;
};
 
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
bool load_detect_temp;
792,6 → 950,7
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
804,7 → 963,8
}
 
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
 
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
816,7 → 976,6
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
void intel_ddi_pll_init(struct drm_device *dev);
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
823,69 → 982,61
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
bool intel_ddi_pll_select(struct intel_crtc *crtc);
bool intel_ddi_pll_select(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
struct intel_crtc_state *pipe_config);
struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
 
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring);
enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flush(struct drm_device *dev,
void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits);
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
* synchronous plane updates which will happen on the next vblank and which will
* not get delayed by pending gpu rendering.
*
* Can be called without any locks held.
*/
static inline
void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits)
{
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
unsigned int intel_fb_align_height(struct drm_device *dev,
unsigned int height,
uint32_t pixel_format,
uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
enum fb_op_origin origin);
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format);
 
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
 
 
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
 
/* intel_display.c */
const char *intel_output_name(int output);
extern const struct drm_plane_funcs intel_plane_funcs;
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
int intel_hrawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
void intel_connector_dpms(struct drm_connector *, int mode);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_modeset_check_state(struct drm_device *dev);
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
904,17 → 1055,20
}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport);
struct intel_digital_port *dport,
unsigned int expected_mask);
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
const struct drm_plane_state *plane_state,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
923,7 → 1077,34
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *old_state);
int intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val);
int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
 
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
uint64_t fb_format_modifier, unsigned int plane);
 
static inline bool
intel_rotation_90_or_270(unsigned int rotation)
{
return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
}
 
void intel_create_rotation_property(struct drm_device *dev,
struct intel_plane *plane);
 
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
931,8 → 1112,8
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
void intel_put_shared_dpll(struct intel_crtc *crtc);
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state);
 
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
952,7 → 1133,8
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
960,39 → 1142,72
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void broxton_init_cdclk(struct drm_device *dev);
void broxton_uninit_cdclk(struct drm_device *dev);
void broxton_ddi_phy_init(struct drm_device *dev);
void broxton_ddi_phy_uninit(struct drm_device *dev);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc);
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock);
int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
 
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
 
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
struct drm_i915_gem_object *obj,
unsigned int plane);
 
u32 skl_plane_ctl_format(uint32_t pixel_format);
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
u32 skl_plane_ctl_rotation(unsigned int rotation);
 
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_device *dev);
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv);
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
enum csr_state state);
void intel_csr_load_program(struct drm_device *dev);
void intel_csr_ucode_fini(struct drm_device *dev);
void assert_csr_loaded(struct drm_i915_private *dev_priv);
 
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_complete_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
void intel_dp_check_link_status(struct intel_dp *intel_dp);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
struct intel_crtc_state *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
999,15 → 1214,23
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_bw(struct intel_dp *intel_dp);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
void intel_edp_drrs_disable(struct intel_dp *intel_dp);
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
 
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1021,7 → 1244,7
 
 
/* legacy fbdev emulation in intel_fbdev.c */
#ifdef CONFIG_DRM_I915_FBDEV
#ifdef CONFIG_DRM_FBDEV_EMULATION
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
extern void intel_fbdev_fini(struct drm_device *dev);
1051,6 → 1274,20
}
#endif
 
/* intel_fbc.c */
bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
void intel_fbc_update(struct drm_i915_private *dev_priv);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_disable(struct drm_i915_private *dev_priv);
void intel_fbc_disable_crtc(struct intel_crtc *crtc);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
 
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1057,7 → 1294,7
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
struct intel_crtc_state *pipe_config);
 
 
/* intel_lvds.c */
1071,6 → 1308,7
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
 
 
/* intel_overlay.c */
1081,6 → 1319,7
struct drm_file *file_priv);
int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
 
 
/* intel_panel.c */
1091,10 → 1330,10
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
struct intel_crtc_state *pipe_config,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
1102,7 → 1341,6
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
void intel_panel_init_backlight_funcs(struct drm_device *dev);
enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_device *dev,
1113,14 → 1351,16
 
 
/* intel_psr.c */
bool intel_psr_is_enabled(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct drm_device *dev);
void intel_psr_single_frame_update(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_psr_init(struct drm_device *dev);
 
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
1136,8 → 1376,6
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1144,6 → 1382,12
 
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
 
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override);
 
 
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
1157,8 → 1401,6
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_device *dev);
1167,16 → 1409,22
void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps,
unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *req);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 
 
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
 
1183,22 → 1431,47
 
/* intel_sprite.c */
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane);
int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop,
uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
void intel_pipe_update_start(struct intel_crtc *crtc);
void intel_pipe_update_end(struct intel_crtc *crtc);
 
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
 
/* intel_atomic.c */
int intel_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_clear(struct drm_atomic_state *);
struct intel_shared_dpll_config *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
 
static inline struct intel_crtc_state *
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
if (IS_ERR(crtc_state))
return ERR_CAST(crtc_state);
 
return to_intel_crtc_state(crtc_state);
}
int intel_atomic_setup_scalers(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
 
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
 
#endif /* __INTEL_DRV_H__ */
/drivers/video/drm/i915/intel_dsi.c
24,27 → 24,223
*/
 
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <drm/drm_panel.h>
#include <drm/drm_mipi_dsi.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
 
/* the sub-encoders aka panel drivers */
static const struct intel_dsi_device intel_dsi_devices[] = {
static const struct {
u16 panel_id;
struct drm_panel * (*init)(struct intel_dsi *intel_dsi, u16 panel_id);
} intel_dsi_drivers[] = {
{
.panel_id = MIPI_DSI_GENERIC_PANEL_ID,
.name = "vbt-generic-dsi-vid-mode-display",
.dev_ops = &vbt_generic_dsi_display_ops,
.init = vbt_panel_init,
},
};
 
static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask;
 
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
 
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
 
static void write_data(struct drm_i915_private *dev_priv, u32 reg,
const u8 *data, u32 len)
{
u32 i, j;
 
for (i = 0; i < len; i += 4) {
u32 val = 0;
 
for (j = 0; j < min_t(u32, len - i, 4); j++)
val |= *data++ << 8 * j;
 
I915_WRITE(reg, val);
}
}
 
static void read_data(struct drm_i915_private *dev_priv, u32 reg,
u8 *data, u32 len)
{
u32 i, j;
 
for (i = 0; i < len; i += 4) {
u32 val = I915_READ(reg);
 
for (j = 0; j < min_t(u32, len - i, 4); j++)
*data++ = val >> 8 * j;
}
}
 
static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dsi_host->port;
struct mipi_dsi_packet packet;
ssize_t ret;
const u8 *header, *data;
u32 data_reg, data_mask, ctrl_reg, ctrl_mask;
 
ret = mipi_dsi_create_packet(&packet, msg);
if (ret < 0)
return ret;
 
header = packet.header;
data = packet.payload;
 
if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
data_reg = MIPI_LP_GEN_DATA(port);
data_mask = LP_DATA_FIFO_FULL;
ctrl_reg = MIPI_LP_GEN_CTRL(port);
ctrl_mask = LP_CTRL_FIFO_FULL;
} else {
data_reg = MIPI_HS_GEN_DATA(port);
data_mask = HS_DATA_FIFO_FULL;
ctrl_reg = MIPI_HS_GEN_CTRL(port);
ctrl_mask = HS_CTRL_FIFO_FULL;
}
 
/* note: this is never true for reads */
if (packet.payload_length) {
 
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
 
write_data(dev_priv, data_reg, packet.payload,
packet.payload_length);
}
 
if (msg->rx_len) {
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
 
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
}
 
I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
 
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
 
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
}
 
/* XXX: fix for reads and writes */
return 4 + packet.payload_length;
}
 
static int intel_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi)
{
return 0;
}
 
static int intel_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi)
{
return 0;
}
 
static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
.attach = intel_dsi_host_attach,
.detach = intel_dsi_host_detach,
.transfer = intel_dsi_host_transfer,
};
 
static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
enum port port)
{
struct intel_dsi_host *host;
struct mipi_dsi_device *device;
 
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host)
return NULL;
 
host->base.ops = &intel_dsi_host_ops;
host->intel_dsi = intel_dsi;
host->port = port;
 
/*
* We should call mipi_dsi_host_register(&host->base) here, but we don't
* have a host->dev, and we don't have OF stuff either. So just use the
* dsi framework as a library and hope for the best. Create the dsi
* devices by ourselves here too. Need to be careful though, because we
* don't initialize any of the driver model devices here.
*/
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device) {
kfree(host);
return NULL;
}
 
device->host = &host->base;
host->device = device;
 
return host;
}
 
/*
* send a video mode command
*
* XXX: commands with data in MIPI_DPI_DATA?
*/
static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask;
 
/* XXX: pipe, hs */
if (hs)
cmd &= ~DPI_LP_MODE;
else
cmd |= DPI_LP_MODE;
 
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
 
/* XXX: old code skips write if control unchanged */
if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
 
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
 
mask = SPL_PKT_SENT_INTERRUPT;
if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
 
return 0;
}
 
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
53,15 → 249,9
vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_dsi, base);
}
 
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
72,20 → 262,14
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
 
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
}
 
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *config)
struct intel_crtc_state *config)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
struct drm_display_mode *mode = &config->requested_mode;
struct drm_display_mode *adjusted_mode = &config->base.adjusted_mode;
 
DRM_DEBUG_KMS("\n");
 
95,74 → 279,183
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
 
if (intel_dsi->dev.dev_ops->mode_fixup)
return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
mode, adjusted_mode);
 
return true;
}
 
static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int pipe = intel_crtc->pipe;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->dpio_lock);
/* Exit Low power state in 4 steps*/
for_each_dsi_port(port, intel_dsi->ports) {
 
/* 1. Enable MIPI PHY transparent latch */
val = I915_READ(BXT_MIPI_PORT_CTRL(port));
I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
 
/* 2. Enter ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
usleep_range(2, 3);
 
/* 3. Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
usleep_range(1000, 1500);
 
/* Clear ULPS and set device ready */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= DEVICE_READY;
I915_WRITE(MIPI_DEVICE_READY(port), val);
}
}
 
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->sb_lock);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
for_each_dsi_port(port, intel_dsi->ports) {
 
I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
usleep_range(2500, 3000);
 
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
/* Enable MIPI PHY transparent latch
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
val = I915_READ(MIPI_PORT_CTRL(PORT_A));
I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
usleep_range(2500, 3000);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
usleep_range(2500, 3000);
}
}
 
static void intel_dsi_enable(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
 
if (IS_VALLEYVIEW(dev))
vlv_dsi_device_ready(encoder);
else if (IS_BROXTON(dev))
bxt_dsi_device_ready(encoder);
}
 
static void intel_dsi_port_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
enum port port;
u32 temp;
u32 port_ctrl;
 
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
temp = I915_READ(VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
PIXEL_OVERLAP_CNT_SHIFT;
I915_WRITE(VLV_CHICKEN_3, temp);
}
 
for_each_dsi_port(port, intel_dsi->ports) {
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
 
temp = I915_READ(port_ctrl);
 
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
 
if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
temp |= intel_crtc->pipe ?
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
/* assert ip_tg_enable signal */
I915_WRITE(port_ctrl, temp | DPI_ENABLE);
POSTING_READ(port_ctrl);
}
}
 
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 temp;
u32 port_ctrl;
 
for_each_dsi_port(port, intel_dsi->ports) {
/* de-assert ip_tg_enable signal */
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
temp = I915_READ(port_ctrl);
I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
POSTING_READ(port_ctrl);
}
}
 
static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
 
DRM_DEBUG_KMS("\n");
 
if (is_cmd_mode(intel_dsi))
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
else {
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
} else {
msleep(20); /* XXX */
dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
for_each_dsi_port(port, intel_dsi->ports)
dpi_send_cmd(intel_dsi, TURN_ON, false, port);
msleep(100);
 
if (intel_dsi->dev.dev_ops->enable)
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
drm_panel_enable(intel_dsi->panel);
 
wait_for_dsi_fifo_empty(intel_dsi);
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
 
/* assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
temp = temp | intel_dsi->port_bits;
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
intel_dsi_port_enable(encoder);
}
 
intel_panel_enable_backlight(intel_dsi->attached_connector);
}
 
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
172,37 → 465,44
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
enum port port;
u32 tmp;
 
DRM_DEBUG_KMS("\n");
 
/* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled */
/* Panel Enable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
 
msleep(intel_dsi->panel_on_delay);
 
if (IS_VALLEYVIEW(dev)) {
/*
* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled
*/
tmp = I915_READ(DPLL(pipe));
tmp |= DPLL_REFA_CLK_ENABLE_VLV;
tmp |= DPLL_REF_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
 
/* update the hw state for DPLL */
intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
DPLL_REFA_CLK_ENABLE_VLV;
intel_crtc->config->dpll_hw_state.dpll =
DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
}
 
/* put device in ready state */
intel_dsi_device_ready(encoder);
 
msleep(intel_dsi->panel_on_delay);
drm_panel_prepare(intel_dsi->panel);
 
if (intel_dsi->dev.dev_ops->panel_reset)
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
 
if (intel_dsi->dev.dev_ops->send_otp_cmds)
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
 
wait_for_dsi_fifo_empty(intel_dsi);
 
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
intel_dsi_enable(encoder);
221,12 → 521,16
static void intel_dsi_pre_disable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
 
DRM_DEBUG_KMS("\n");
 
intel_panel_disable_backlight(intel_dsi->attached_connector);
 
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
for_each_dsi_port(port, intel_dsi->ports)
dpi_send_cmd(intel_dsi, SHUTDOWN, false, port);
msleep(10);
}
}
235,79 → 539,88
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
enum port port;
u32 temp;
 
DRM_DEBUG_KMS("\n");
 
if (is_vid_mode(intel_dsi)) {
wait_for_dsi_fifo_empty(intel_dsi);
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
 
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
 
intel_dsi_port_disable(encoder);
msleep(2);
}
 
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
 
temp = I915_READ(MIPI_CTRL(pipe));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(pipe), temp |
intel_dsi->escape_clk_div <<
ESCAPE_CLOCK_DIVIDER_SHIFT);
intel_dsi_reset_clocks(encoder, port);
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
 
I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
 
temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
temp &= ~VID_MODE_FORMAT_MASK;
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
 
I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
}
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
drm_panel_disable(intel_dsi->panel);
 
wait_for_dsi_fifo_empty(intel_dsi);
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
}
 
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int pipe = intel_crtc->pipe;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
u32 port_ctrl = 0;
 
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
 
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_EXIT);
usleep_range(2000, 2500);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
 
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
if (IS_BROXTON(dev))
port_ctrl = BXT_MIPI_PORT_CTRL(port);
else if (IS_VALLEYVIEW(dev))
/* Common bit for both MIPI Port A & MIPI Port C */
port_ctrl = MIPI_PORT_CTRL(PORT_A);
 
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking
*/
if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
 
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
/* Disable MIPI PHY transparent latch */
val = I915_READ(port_ctrl);
I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
 
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
 
vlv_disable_dsi_pll(encoder);
intel_disable_dsi_pll(encoder);
}
 
static void intel_dsi_post_disable(struct intel_encoder *encoder)
326,11 → 639,14
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
 
if (intel_dsi->dev.dev_ops->disable_panel_power)
intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
drm_panel_unprepare(intel_dsi->panel);
 
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
 
/* Panel Disable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
}
 
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
337,9 → 653,11
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
u32 port, func;
enum pipe p;
u32 dpi_enabled, func, ctrl_reg;
enum port port;
 
DRM_DEBUG_KMS("\n");
 
348,13 → 666,24
return false;
 
/* XXX: this only works for one DSI output */
for (p = PIPE_A; p <= PIPE_B; p++) {
port = I915_READ(MIPI_PORT_CTRL(p));
func = I915_READ(MIPI_DSI_FUNC_PRG(p));
for_each_dsi_port(port, intel_dsi->ports) {
func = I915_READ(MIPI_DSI_FUNC_PRG(port));
ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
 
if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
*pipe = p;
/* Due to some hardware limitations on BYT, MIPI Port C DPI
* Enable bit does not get set. To check whether DSI Port C
* was enabled in BIOS, check the Pipe B enable bit
*/
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
(port == PORT_C))
dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
PIPECONF_ENABLE;
 
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
return true;
}
}
364,9 → 693,9
}
 
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
u32 pclk;
u32 pclk = 0;
DRM_DEBUG_KMS("\n");
 
/*
375,11 → 704,15
*/
pipe_config->dpll_hw_state.dpll_md = 0;
 
if (IS_BROXTON(encoder->base.dev))
pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
else if (IS_VALLEYVIEW(encoder->base.dev))
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
 
if (!pclk)
return;
 
pipe_config->adjusted_mode.crtc_clock = pclk;
pipe_config->base.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
 
389,7 → 722,7
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
DRM_DEBUG_KMS("\n");
 
403,9 → 736,11
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
if (fixed_mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
}
 
return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
return MODE_OK;
}
 
/* return txclkesc cycles in terms of divider and duration in us */
431,27 → 766,36
}
 
static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pipe = intel_crtc->pipe;
unsigned int bpp = intel_crtc->config.pipe_bpp;
enum port port;
unsigned int bpp = intel_crtc->config->pipe_bpp;
unsigned int lane_count = intel_dsi->lane_count;
 
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
 
hactive = mode->hdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
hactive = adjusted_mode->crtc_hdisplay;
hfp = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay;
hsync = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
hbp = adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end;
 
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
if (intel_dsi->dual_link) {
hactive /= 2;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
hactive += intel_dsi->pixel_overlap;
hfp /= 2;
hsync /= 2;
hbp /= 2;
}
 
vfp = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay;
vsync = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
vbp = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_end;
 
/* horizontal values are in terms of high speed byte clock */
hactive = txbyteclkhs(hactive, bpp, lane_count,
intel_dsi->burst_mode_ratio);
460,19 → 804,36
intel_dsi->burst_mode_ratio);
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
 
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
for_each_dsi_port(port, intel_dsi->ports) {
if (IS_BROXTON(dev)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
* vactive, as they are calculated per channel basis,
* whereas these values should be based on resolution.
*/
I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
adjusted_mode->crtc_hdisplay);
I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
adjusted_mode->crtc_vdisplay);
I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
adjusted_mode->crtc_vtotal);
}
 
/* meaningful for video mode non-burst sync pulse mode only, can be zero
* for non-burst sync events and burst modes */
I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
I915_WRITE(MIPI_HFP_COUNT(port), hfp);
 
/* meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes */
I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
I915_WRITE(MIPI_HBP_COUNT(port), hbp);
 
/* vertical values are in terms of lines */
I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
I915_WRITE(MIPI_VFP_COUNT(port), vfp);
I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
I915_WRITE(MIPI_VBP_COUNT(port), vbp);
}
}
 
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
481,34 → 842,67
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
int pipe = intel_crtc->pipe;
unsigned int bpp = intel_crtc->config.pipe_bpp;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum port port;
unsigned int bpp = intel_crtc->config->pipe_bpp;
u32 val, tmp;
u16 mode_hdisplay;
 
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
 
/* escape clock divider, 20MHz, shared for A and C. device ready must be
* off when doing this! txclkesc? */
tmp = I915_READ(MIPI_CTRL(0));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
 
if (intel_dsi->dual_link) {
mode_hdisplay /= 2;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
mode_hdisplay += intel_dsi->pixel_overlap;
}
 
for_each_dsi_port(port, intel_dsi->ports) {
if (IS_VALLEYVIEW(dev)) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
*/
tmp = I915_READ(MIPI_CTRL(PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
I915_WRITE(MIPI_CTRL(PORT_A), tmp |
ESCAPE_CLOCK_DIVIDER_1);
 
/* read request priority is per pipe */
tmp = I915_READ(MIPI_CTRL(pipe));
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
} else if (IS_BROXTON(dev)) {
/*
* FIXME:
* BXT can connect any PIPE to any MIPI port.
* Select the pipe based on the MIPI port read from
* VBT for now. Pick PIPE A for MIPI port A and C
* for port C.
*/
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK;
 
if (port == PORT_A)
tmp |= BXT_PIPE_SELECT_A;
else if (port == PORT_C)
tmp |= BXT_PIPE_SELECT_C;
 
I915_WRITE(MIPI_CTRL(port), tmp);
}
 
/* XXX: why here, why like this? handling in irq handler?! */
I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
 
I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
 
I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
I915_WRITE(MIPI_DPI_RESOLUTION(port),
adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT |
mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
 
set_dsi_timings(encoder, adjusted_mode);
 
522,59 → 916,75
/* XXX: cross-check bpp vs. pixel format? */
val |= intel_dsi->pixel_format;
}
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
 
/* timeouts for recovery. one frame IIUC. if counter expires, EOT and
* stop state. */
tmp = 0;
if (intel_dsi->eotp_pkt == 0)
tmp |= EOT_DISABLE;
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
 
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
 
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
 
/*
* In burst mode, value greater than one DPI line Time in byte clock
* (txbyteclkhs) To timeout this timer 1+ of the above said value is
* recommended.
* In burst mode, value greater than one DPI line Time in byte
* clock (txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*
* In non-burst mode, Value greater than one DPI frame time in byte
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
* is recommended.
* In non-burst mode, Value greater than one DPI frame time in
* byte clock(txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*
* In DBI only mode, value greater than one DBI frame time in byte
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
* is recommended.
* In DBI only mode, value greater than one DBI frame time in
* byte clock(txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*/
 
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->htotal, bpp,
I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
txbyteclkhs(adjusted_mode->crtc_htotal, bpp,
intel_dsi->lane_count,
intel_dsi->burst_mode_ratio) + 1);
} else {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->vtotal *
adjusted_mode->htotal,
I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
txbyteclkhs(adjusted_mode->crtc_vtotal *
adjusted_mode->crtc_htotal,
bpp, intel_dsi->lane_count,
intel_dsi->burst_mode_ratio) + 1);
}
I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
intel_dsi->turn_arnd_val);
I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
intel_dsi->rst_timer_val);
 
/* dphy stuff */
 
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
 
val = 0;
if (intel_dsi->eotp_pkt == 0)
val |= EOT_DISABLE;
if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
* getting used. So write the other port
* if not in dual link mode.
*/
I915_WRITE(MIPI_INIT_COUNT(port ==
PORT_A ? PORT_C : PORT_A),
intel_dsi->init_count);
}
 
if (intel_dsi->clock_stop)
val |= CLOCKSTOP;
 
/* recovery disables */
I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
 
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
 
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
581,37 → 991,39
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
intel_dsi->hs_to_lp_count);
 
/* XXX: low power clock equivalence in terms of byte clock. the number
* of byte clocks occupied in one low power clock. based on txbyteclkhs
* and txclkesc. txclkesc time / txbyteclk time * (105 +
* MIPI_STOP_STATE_STALL) / 105.???
/* XXX: low power clock equivalence in terms of byte clock.
* the number of byte clocks occupied in one low power clock.
* based on txbyteclkhs and txclkesc.
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
 
/* the bw essential for transmitting 16 long packets containing 252
* bytes meant for dcs write memory command is programmed in this
* register in terms of byte clocks. based on dsi transfer rate and the
* number of lanes configured the time taken to transmit 16 long packets
* in a dsi stream varies. */
I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
/* the bw essential for transmitting 16 long packets containing
* 252 bytes meant for dcs write memory command is programmed in
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
 
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
 
if (is_vid_mode(intel_dsi))
/* Some panels might have resolution which is not a multiple of
* 64 like 1366 x 768. Enable RANDOM resolution support for such
* panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
/* Some panels might have resolution which is not a
* multiple of 64 like 1366 x 768. Enable RANDOM
* resolution support for such panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
intel_dsi->video_frmt_cfg_bits |
intel_dsi->video_mode_format |
IP_TG_CONFIG |
RANDOM_DPI_DISPLAY_RESOLUTION);
}
}
 
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
{
618,27 → 1030,14
DRM_DEBUG_KMS("\n");
 
intel_dsi_prepare(encoder);
intel_enable_dsi_pll(encoder);
 
vlv_enable_dsi_pll(encoder);
}
 
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
struct intel_encoder *intel_encoder = &intel_dsi->base;
enum intel_display_power_domain power_domain;
enum drm_connector_status connector_status;
struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
 
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(intel_encoder);
 
intel_display_power_get(dev_priv, power_domain);
connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
intel_display_power_put(dev_priv, power_domain);
 
return connector_status;
return connector_status_connected;
}
 
static int intel_dsi_get_modes(struct drm_connector *connector)
664,7 → 1063,7
return 1;
}
 
static void intel_dsi_destroy(struct drm_connector *connector)
static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
 
674,8 → 1073,25
kfree(connector);
}
 
static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
if (intel_dsi->panel) {
drm_panel_detach(intel_dsi->panel);
/* XXX: Logically this call belongs in the panel driver. */
drm_panel_remove(intel_dsi->panel);
}
 
/* dispose of the gpios */
if (intel_dsi->gpio_panel)
gpiod_put(intel_dsi->gpio_panel);
 
intel_encoder_destroy(encoder);
}
 
static const struct drm_encoder_funcs intel_dsi_funcs = {
.destroy = intel_encoder_destroy,
.destroy = intel_dsi_encoder_destroy,
};
 
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
685,10 → 1101,13
};
 
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = intel_connector_dpms,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dsi_detect,
.destroy = intel_dsi_destroy,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
 
void intel_dsi_init(struct drm_device *dev)
698,9 → 1117,9
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *scan, *fixed_mode = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_dsi_device *dsi;
enum port port;
unsigned int i;
 
DRM_DEBUG_KMS("\n");
720,7 → 1139,7
if (!intel_dsi)
return;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dsi);
return;
735,7 → 1154,6
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
 
/* XXX: very likely not all of these are needed */
intel_encoder->hot_plug = intel_dsi_hot_plug;
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
748,22 → 1166,56
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
 
for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
dsi = &intel_dsi_devices[i];
intel_dsi->dev = *dsi;
/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
intel_encoder->crtc_mask = (1 << PIPE_A);
intel_dsi->ports = (1 << PORT_A);
} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
intel_encoder->crtc_mask = (1 << PIPE_B);
intel_dsi->ports = (1 << PORT_C);
}
 
if (dsi->dev_ops->init(&intel_dsi->dev))
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
 
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
 
host = intel_dsi_host_init(intel_dsi, port);
if (!host)
goto err;
 
intel_dsi->dsi_hosts[port] = host;
}
 
for (i = 0; i < ARRAY_SIZE(intel_dsi_drivers); i++) {
intel_dsi->panel = intel_dsi_drivers[i].init(intel_dsi,
intel_dsi_drivers[i].panel_id);
if (intel_dsi->panel)
break;
}
 
if (i == ARRAY_SIZE(intel_dsi_devices)) {
if (!intel_dsi->panel) {
DRM_DEBUG_KMS("no device found\n");
goto err;
}
 
/*
* In case of BYT with CRC PMIC, we need to use GPIO for
* Panel control.
*/
if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
intel_dsi->gpio_panel =
gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
 
if (IS_ERR(intel_dsi->gpio_panel)) {
DRM_ERROR("Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
}
 
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->crtc_mask = (1 << 0); /* XXX */
 
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
778,14 → 1230,25
 
drm_connector_register(connector);
 
fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
drm_panel_attach(intel_dsi->panel, connector);
 
mutex_lock(&dev->mode_config.mutex);
drm_panel_get_modes(intel_dsi->panel);
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
break;
}
}
mutex_unlock(&dev->mode_config.mutex);
 
if (!fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
goto err;
}
 
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
 
return;
 
/drivers/video/drm/i915/intel_dsi.h
26,58 → 26,30
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
 
struct intel_dsi_device {
unsigned int panel_id;
const char *name;
const struct intel_dsi_dev_ops *dev_ops;
void *dev_priv;
};
/* Dual Link support */
#define DSI_DUAL_LINK_NONE 0
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
 
struct intel_dsi_dev_ops {
bool (*init)(struct intel_dsi_device *dsi);
struct intel_dsi_host;
 
void (*panel_reset)(struct intel_dsi_device *dsi);
 
void (*disable_panel_power)(struct intel_dsi_device *dsi);
 
/* one time programmable commands if needed */
void (*send_otp_cmds)(struct intel_dsi_device *dsi);
 
/* This callback must be able to assume DSI commands can be sent */
void (*enable)(struct intel_dsi_device *dsi);
 
/* This callback must be able to assume DSI commands can be sent */
void (*disable)(struct intel_dsi_device *dsi);
 
int (*mode_valid)(struct intel_dsi_device *dsi,
struct drm_display_mode *mode);
 
bool (*mode_fixup)(struct intel_dsi_device *dsi,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
void (*mode_set)(struct intel_dsi_device *dsi,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
 
bool (*get_hw_state)(struct intel_dsi_device *dev);
 
struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
 
void (*destroy) (struct intel_dsi_device *dsi);
};
 
struct intel_dsi {
struct intel_encoder base;
 
struct intel_dsi_device dev;
struct drm_panel *panel;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
 
/* GPIO Desc for CRC based Panel control */
struct gpio_desc *gpio_panel;
 
struct intel_connector *attached_connector;
 
/* bit mask of ports being driven */
u16 ports;
 
/* if true, use HS mode, otherwise LP */
bool hs;
 
101,6 → 73,8
u8 clock_stop;
 
u8 escape_clk_div;
u8 dual_link;
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
127,15 → 101,36
u16 panel_pwr_cycle_delay;
};
 
struct intel_dsi_host {
struct mipi_dsi_host base;
struct intel_dsi *intel_dsi;
enum port port;
 
/* our little hack */
struct mipi_dsi_device *device;
};
 
static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
{
return container_of(h, struct intel_dsi_host, base);
}
 
#define for_each_dsi_port(__port, __ports_mask) \
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
if ((__ports_mask) & (1 << (__port)))
 
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);
}
 
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
 
extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
 
#endif /* _INTEL_DSI_H */
/drivers/video/drm/i915/intel_dsi_panel_vbt.c
28,15 → 28,25
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <drm/drm_panel.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
//#include <asm/intel-mid.h>
#include <asm/intel-mid.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
 
struct vbt_panel {
struct drm_panel panel;
struct intel_dsi *intel_dsi;
};
 
static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
{
return container_of(panel, struct vbt_panel, panel);
}
 
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
94,34 → 104,59
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
};
 
static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
u8 type, byte, mode, vc, port;
return port ? PORT_C : PORT_A;
}
 
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
enum port port;
 
byte = *data++;
mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
port = (byte >> MIPI_PORT_SHIFT) & 0x3;
 
/* LP or HS mode */
intel_dsi->hs = mode;
 
/* get packet type and increment the pointer */
flags = *data++;
type = *data++;
 
len = *((u16 *) data);
data += 2;
 
seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
 
/* For DSI single link on Port A & C, the seq_port value which is
* parsed from Sequence Block#53 of VBT has been set to 0
* Now, read/write of packets for the DSI single link on Port A and
* Port C will based on the DVO port from VBT block 2.
*/
if (intel_dsi->ports == (1 << PORT_C))
port = PORT_C;
else
port = intel_dsi_seq_port_to_port(seq_port);
 
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
goto out;
}
 
if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
else
dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
 
dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
 
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
dsi_vc_generic_write_0(intel_dsi, vc);
mipi_dsi_generic_write(dsi_device, NULL, 0);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
dsi_vc_generic_write_1(intel_dsi, vc, *data);
mipi_dsi_generic_write(dsi_device, data, 1);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
mipi_dsi_generic_write(dsi_device, data, 2);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
129,30 → 164,31
DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
dsi_vc_generic_write(intel_dsi, vc, data, len);
mipi_dsi_generic_write(dsi_device, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
dsi_vc_dcs_write_0(intel_dsi, vc, *data);
mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
 
out:
data += len;
 
return data;
}
 
static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
u32 delay = *((u32 *) data);
u32 delay = *((const u32 *) data);
 
usleep_range(delay, delay + 10);
data += 4;
160,7 → 196,7
return data;
}
 
static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
u8 gpio, action;
u16 function, pad;
176,7 → 212,7
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
if (!gtable[gpio].init) {
/* program the function */
/* FIXME: remove constant below */
188,12 → 224,13
 
/* pull up/down */
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
return data;
}
 
typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
NULL, /* reserved */
mipi_exec_send_packet,
217,13 → 254,12
"MIPI_SEQ_DEASSERT_RESET"
};
 
static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
{
u8 *data = sequence;
fn_mipi_elem_exec mipi_elem_exec;
int index;
 
if (!sequence)
if (!data)
return;
 
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
256,14 → 292,103
}
}
 
static bool generic_init(struct intel_dsi_device *dsi)
static int vbt_panel_prepare(struct drm_panel *panel)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
generic_exec_sequence(intel_dsi, sequence);
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
generic_exec_sequence(intel_dsi, sequence);
 
return 0;
}
 
static int vbt_panel_unprepare(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
generic_exec_sequence(intel_dsi, sequence);
 
return 0;
}
 
static int vbt_panel_enable(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
generic_exec_sequence(intel_dsi, sequence);
 
return 0;
}
 
static int vbt_panel_disable(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
 
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
generic_exec_sequence(intel_dsi, sequence);
 
return 0;
}
 
static int vbt_panel_get_modes(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *mode;
 
if (!panel->connector)
return 0;
 
mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (!mode)
return 0;
 
mode->type |= DRM_MODE_TYPE_PREFERRED;
 
drm_mode_probed_add(panel->connector, mode);
 
return 1;
}
 
static const struct drm_panel_funcs vbt_panel_funcs = {
.disable = vbt_panel_disable,
.unprepare = vbt_panel_unprepare,
.prepare = vbt_panel_prepare,
.enable = vbt_panel_enable,
.get_modes = vbt_panel_get_modes,
};
 
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
struct vbt_panel *vbt_panel;
u32 bits_per_pixel = 24;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
273,6 → 398,7
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
u16 burst_mode_ratio;
enum port port;
 
DRM_DEBUG_KMS("\n");
 
280,6 → 406,8
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
 
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
299,6 → 427,20
 
pclk = mode->clock;
 
/* In dual link mode each port needs half of pixel clock */
if (intel_dsi->dual_link) {
pclk = pclk / 2;
 
/* we can enable pixel_overlap if needed by panel. In this
* case we need to increase the pixelclock for extra pixels
*/
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
pclk += DIV_ROUND_UP(mode->vtotal *
intel_dsi->pixel_overlap *
60, 1000);
}
}
 
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
311,7 → 453,7
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
return false;
return NULL;
}
 
burst_mode_ratio = DIV_ROUND_UP(
321,7 → 463,7
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
return false;
return NULL;
}
} else
burst_mode_ratio = 100;
493,6 → 635,12
DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
"disabled" : "enabled");
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
DRM_DEBUG_KMS("Dual link: NONE\n");
DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
516,110 → 664,18
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
 
return true;
}
/* This is cheating a bit with the cleanup. */
vbt_panel = kzalloc(sizeof(*vbt_panel), GFP_KERNEL);
 
static int generic_mode_valid(struct intel_dsi_device *dsi,
struct drm_display_mode *mode)
{
return MODE_OK;
}
vbt_panel->intel_dsi = intel_dsi;
drm_panel_init(&vbt_panel->panel);
vbt_panel->panel.funcs = &vbt_panel_funcs;
drm_panel_add(&vbt_panel->panel);
 
static bool generic_mode_fixup(struct intel_dsi_device *dsi,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) {
return true;
/* a regular driver would get the device in probe */
for_each_dsi_port(port, intel_dsi->ports) {
mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
}
 
static void generic_panel_reset(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
 
generic_exec_sequence(intel_dsi, sequence);
return &vbt_panel->panel;
}
 
static void generic_disable_panel_power(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static void generic_enable(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static void generic_disable(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
 
generic_exec_sequence(intel_dsi, sequence);
}
 
static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
{
return connector_status_connected;
}
 
static bool generic_get_hw_state(struct intel_dsi_device *dev)
{
return true;
}
 
static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
return dev_priv->vbt.lfp_lvds_vbt_mode;
}
 
static void generic_destroy(struct intel_dsi_device *dsi) { }
 
/* Callbacks. We might not need them all. */
struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
.init = generic_init,
.mode_valid = generic_mode_valid,
.mode_fixup = generic_mode_fixup,
.panel_reset = generic_panel_reset,
.disable_panel_power = generic_disable_panel_power,
.send_otp_cmds = generic_send_otp_cmds,
.enable = generic_enable,
.disable = generic_disable,
.detect = generic_detect,
.get_hw_state = generic_get_hw_state,
.get_modes = generic_get_modes,
.destroy = generic_destroy,
};
/drivers/video/drm/i915/intel_dsi_pll.c
38,6 → 38,27
#define DSI_HFP_PACKET_EXTRA_SIZE 6
#define DSI_EOTP_PACKET_SIZE 4
 
static int dsi_pixel_format_bpp(int pixel_format)
{
int bpp;
 
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
 
return bpp;
}
 
struct dsi_mnp {
u32 dsi_pll_ctrl;
u32 dsi_pll_div;
46,8 → 67,8
static const u32 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
71, 35 /* 91 - 92 */
106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */
71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
};
 
#ifdef DSI_CLK_FROM_RR
65,19 → 86,7
u32 dsi_bit_clock_hz;
u32 dsi_clk;
 
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
bpp = dsi_pixel_format_bpp(pixel_format);
 
hactive = mode->hdisplay;
vactive = mode->vdisplay;
137,22 → 146,8
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
u32 dsi_clk_khz;
u32 bpp;
u32 bpp = dsi_pixel_format_bpp(pixel_format);
 
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
 
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
162,59 → 157,55
 
#endif
 
static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
struct dsi_mnp *dsi_mnp, int target_dsi_clk)
{
u32 m, n, p;
u32 ref_clk;
u32 error;
u32 tmp_error;
int target_dsi_clk;
int calc_dsi_clk;
u32 calc_m;
u32 calc_p;
unsigned int calc_m = 0, calc_p = 0;
unsigned int m_min, m_max, p_min = 2, p_max = 6;
unsigned int m, n, p;
int ref_clk;
int delta = target_dsi_clk;
u32 m_seed;
 
/* dsi_clk is expected in KHZ */
if (dsi_clk < 300000 || dsi_clk > 1150000) {
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
DRM_ERROR("DSI CLK Out of Range\n");
return -ECHRNG;
}
 
if (IS_CHERRYVIEW(dev_priv)) {
ref_clk = 100000;
n = 4;
m_min = 70;
m_max = 96;
} else {
ref_clk = 25000;
target_dsi_clk = dsi_clk;
error = 0xFFFFFFFF;
tmp_error = 0xFFFFFFFF;
calc_m = 0;
calc_p = 0;
n = 1;
m_min = 62;
m_max = 92;
}
 
for (m = 62; m <= 92; m++) {
for (p = 2; p <= 6; p++) {
/* Find the optimal m and p divisors
with minimal error +/- the required clock */
calc_dsi_clk = (m * ref_clk) / p;
if (calc_dsi_clk == target_dsi_clk) {
for (m = m_min; m <= m_max && delta; m++) {
for (p = p_min; p <= p_max && delta; p++) {
/*
* Find the optimal m and p divisors with minimal delta
* +/- the required clock
*/
int calc_dsi_clk = (m * ref_clk) / (p * n);
int d = abs(target_dsi_clk - calc_dsi_clk);
if (d < delta) {
delta = d;
calc_m = m;
calc_p = p;
error = 0;
break;
} else
tmp_error = abs(target_dsi_clk - calc_dsi_clk);
 
if (tmp_error < error) {
error = tmp_error;
calc_m = m;
calc_p = p;
}
}
 
if (error == 0)
break;
}
 
/* register has log2(N1), this works fine for powers of two */
n = ffs(n) - 1;
m_seed = lfsr_converts[calc_m - 62];
n = 1;
dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
m_seed << DSI_PLL_M1_DIV_SHIFT;
 
return 0;
235,14 → 226,18
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
 
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
if (ret) {
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
return;
}
 
if (intel_dsi->ports & (1 << PORT_A))
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
 
if (intel_dsi->ports & (1 << PORT_C))
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
 
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
 
251,7 → 246,7
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
}
 
void vlv_enable_dsi_pll(struct intel_encoder *encoder)
static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp;
258,7 → 253,7
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
vlv_configure_dsi_pll(encoder);
 
269,17 → 264,19
tmp |= DSI_PLL_VCO_EN;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
 
mutex_unlock(&dev_priv->dpio_lock);
if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
 
if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
mutex_unlock(&dev_priv->sb_lock);
DRM_ERROR("DSI PLL lock failed\n");
return;
}
mutex_unlock(&dev_priv->sb_lock);
 
DRM_DEBUG_KMS("DSI PLL locked\n");
}
 
void vlv_disable_dsi_pll(struct intel_encoder *encoder)
static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp;
286,7 → 283,7
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
293,27 → 290,33
tmp |= DSI_PLL_LDO_GATE;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
{
int bpp;
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 val;
 
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
DRM_DEBUG_KMS("\n");
 
val = I915_READ(BXT_DSI_PLL_ENABLE);
val &= ~BXT_DSI_PLL_DO_ENABLE;
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
 
/*
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
& BXT_DSI_PLL_LOCKED) == 0, 1))
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
 
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
{
int bpp = dsi_pixel_format_bpp(pixel_format);
 
WARN(bpp != pipe_bpp,
"bpp match assertion failure (expected %d, current %d)\n",
bpp, pipe_bpp);
325,21 → 328,25
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0;
u32 m = 0, p = 0, n;
int refclk = 25000;
int i;
 
DRM_DEBUG_KMS("\n");
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
/* mask out other bits and extract the P1 divisor */
pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
 
/* N1 divisor */
n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
n = 1 << n; /* register has log2(N1) */
 
/* mask out the other bits and extract the M1 divisor */
pll_div &= DSI_PLL_M1_DIV_MASK;
pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
367,7 → 374,7
 
m = i + 62;
 
dsi_clock = (m * refclk) / p;
dsi_clock = (m * refclk) / (p * n);
 
/* pixel_format and pipe_bpp should agree */
assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
376,3 → 383,222
 
return pclk;
}
 
u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
u32 pclk;
u32 dsi_clk;
u32 dsi_ratio;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 
/* Divide by zero */
if (!pipe_bpp) {
DRM_ERROR("Invalid BPP(0)\n");
return 0;
}
 
dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
BXT_DSI_PLL_RATIO_MASK;
 
/* Invalid DSI ratio ? */
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
return 0;
}
 
dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
 
/* pixel_format and pipe_bpp should agree */
assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
 
pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);
 
DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
return pclk;
}
 
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
temp = I915_READ(MIPI_CTRL(port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(port), temp |
intel_dsi->escape_clk_div <<
ESCAPE_CLOCK_DIVIDER_SHIFT);
}
 
/* Program BXT Mipi clocks and dividers */
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
{
u32 tmp;
u32 divider;
u32 dsi_rate;
u32 pll_ratio;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
 
/* Get the current DSI rate(actual) */
pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
BXT_DSI_PLL_RATIO_MASK;
dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
 
/* Max possible output of clock is 39.5 MHz, program value -1 */
divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1;
tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider);
 
/*
* Tx escape clock must be as close to 20MHz possible, but should
* not exceed it. Hence select divide by 2
*/
tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port);
 
tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port);
 
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
 
static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u8 dsi_ratio;
u32 dsi_clk;
u32 val;
 
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
 
/*
* From clock diagram, to get PLL ratio divider, divide double of DSI
* link rate (i.e., 2*8x=16x frequency value) by ref clock. Make sure to
* round 'up' the result
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
return false;
}
 
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
* Spec says both have to be programmed, even if one is not getting
* used. Configure MIPI_CLOCK_CTL dividers in modeset
*/
val = I915_READ(BXT_DSI_PLL_CTL);
val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
val &= ~BXT_DSI_FREQ_SEL_MASK;
val &= ~BXT_DSI_PLL_RATIO_MASK;
val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
 
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
if (dsi_ratio <= 50) {
val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
val |= BXT_DSI_PLL_PVD_RATIO_1;
}
 
I915_WRITE(BXT_DSI_PLL_CTL, val);
POSTING_READ(BXT_DSI_PLL_CTL);
 
return true;
}
 
static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
 
DRM_DEBUG_KMS("\n");
 
val = I915_READ(BXT_DSI_PLL_ENABLE);
 
if (val & BXT_DSI_PLL_DO_ENABLE) {
WARN(1, "DSI PLL already enabled. Disabling it.\n");
val &= ~BXT_DSI_PLL_DO_ENABLE;
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
}
 
/* Configure PLL vales */
if (!bxt_configure_dsi_pll(encoder)) {
DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
return;
}
 
/* Program TX, RX, Dphy clocks */
for_each_dsi_port(port, intel_dsi->ports)
bxt_dsi_program_clocks(encoder->base.dev, port);
 
/* Enable DSI PLL */
val = I915_READ(BXT_DSI_PLL_ENABLE);
val |= BXT_DSI_PLL_DO_ENABLE;
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
 
/* Timeout and fail if PLL not locked */
if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
return;
}
 
DRM_DEBUG_KMS("DSI PLL locked\n");
}
 
void intel_enable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
 
if (IS_VALLEYVIEW(dev))
vlv_enable_dsi_pll(encoder);
else if (IS_BROXTON(dev))
bxt_enable_dsi_pll(encoder);
}
 
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
 
if (IS_VALLEYVIEW(dev))
vlv_disable_dsi_pll(encoder);
else if (IS_BROXTON(dev))
bxt_disable_dsi_pll(encoder);
}
 
static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 tmp;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
 
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct drm_device *dev = encoder->base.dev;
 
if (IS_BROXTON(dev))
bxt_dsi_reset_clocks(encoder, port);
else if (IS_VALLEYVIEW(dev))
vlv_dsi_reset_clocks(encoder, port);
}
/drivers/video/drm/i915/intel_dvo.c
27,6 → 27,7
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
79,7 → 80,7
.name = "ch7017",
.dvo_reg = DVOC,
.slave_addr = 0x75,
.gpio = GMBUS_PORT_DPB,
.gpio = GMBUS_PIN_DPB,
.dev_ops = &ch7017_ops,
},
{
96,7 → 97,8
 
struct intel_dvo_device dev;
 
struct drm_display_mode *panel_fixed_mode;
struct intel_connector *attached_connector;
 
bool panel_wants_dither;
};
 
144,7 → 146,7
}
 
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
160,9 → 162,9
else
flags |= DRM_MODE_FLAG_NVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
pipe_config->base.adjusted_mode.flags |= flags;
 
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
 
static void intel_disable_dvo(struct intel_encoder *encoder)
186,8 → 188,8
u32 temp = I915_READ(dvo_reg);
 
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&crtc->config.requested_mode,
&crtc->config.adjusted_mode);
&crtc->config->base.mode,
&crtc->config->base.adjusted_mode);
 
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
195,55 → 197,15
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
 
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc;
struct intel_crtc_config *config;
 
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
 
if (mode == connector->dpms)
return;
 
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
crtc = intel_dvo->base.base.crtc;
if (!crtc) {
intel_dvo->base.connectors_active = false;
return;
}
 
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
config = &to_intel_crtc(crtc)->config;
 
intel_dvo->base.connectors_active = true;
 
intel_crtc_update_dpms(crtc);
 
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
 
intel_dvo->base.connectors_active = false;
 
intel_crtc_update_dpms(crtc);
}
 
intel_modeset_check_state(connector->dev);
}
 
static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
 
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
250,21 → 212,28
 
/* XXX: Validate clock range */
 
if (intel_dvo->panel_fixed_mode) {
if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
 
target_clock = fixed_mode->clock;
}
 
if (target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
 
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
 
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
const struct drm_display_mode *fixed_mode =
intel_dvo->attached_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
271,22 → 240,9
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
if (intel_dvo->panel_fixed_mode != NULL) {
#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
C(htotal);
C(vdisplay);
C(vsync_start);
C(vsync_end);
C(vtotal);
C(clock);
#undef C
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
 
return true;
}
 
295,7 → 251,7
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
329,11 → 285,11
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
 
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
I915_WRITE(dvo_srcdim_reg,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
/*I915_WRITE(DVOB, dvo_val);*/
I915_WRITE(dvo_reg, dvo_val);
}
354,8 → 310,9
 
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
 
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
363,13 → 320,13
* that's not the case.
*/
intel_ddc_get_modes(connector,
intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
 
if (intel_dvo->panel_fixed_mode != NULL) {
if (fixed_mode) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
mode = drm_mode_duplicate(connector->dev, fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
382,14 → 339,18
static void intel_dvo_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
intel_panel_fini(&to_intel_connector(connector)->panel);
kfree(connector);
}
 
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = intel_dvo_dpms,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
 
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
405,8 → 366,6
if (intel_dvo->dev.dev_ops->destroy)
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
 
kfree(intel_dvo->panel_fixed_mode);
 
intel_encoder_destroy(encoder);
}
 
465,12 → 424,14
if (!intel_dvo)
return;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dvo);
return;
}
 
intel_dvo->attached_connector = intel_connector;
 
intel_encoder = &intel_dvo->base;
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type);
491,17 → 452,19
struct i2c_adapter *i2c;
int gpio;
bool dvoinit;
enum pipe pipe;
uint32_t dpll[I915_MAX_PIPES];
 
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
*/
if (intel_gmbus_is_port_valid(dvo->gpio))
if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GMBUS_PORT_SSC;
gpio = GMBUS_PIN_SSC;
else
gpio = GMBUS_PORT_DPB;
gpio = GMBUS_PIN_DPB;
 
/* Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
516,8 → 479,23
*/
intel_gmbus_force_bit(i2c, true);
 
/* ns2501 requires the DVO 2x clock before it will
* respond to i2c accesses, so make sure we have
* have the clock enabled before we attempt to
* initialize the device.
*/
for_each_pipe(dev_priv, pipe) {
dpll[pipe] = I915_READ(DPLL(pipe));
I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE);
}
 
dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
 
/* restore the DVO 2x clock state to original */
for_each_pipe(dev_priv, pipe) {
I915_WRITE(DPLL(pipe), dpll[pipe]);
}
 
intel_gmbus_force_bit(i2c, false);
 
if (!dvoinit)
558,8 → 536,9
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
intel_dvo->panel_fixed_mode =
intel_dvo_get_current_mode(connector);
intel_panel_init(&intel_connector->panel,
intel_dvo_get_current_mode(connector),
NULL);
intel_dvo->panel_wants_dither = true;
}
 
/drivers/video/drm/i915/intel_fbc.c
0,0 → 1,1125
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
/**
* DOC: Frame Buffer Compression (FBC)
*
* FBC tries to save memory bandwidth (and so power consumption) by
* compressing the amount of memory used by the display. It is total
* transparent to user space and completely handled in the kernel.
*
* The benefits of FBC are mostly visible with solid backgrounds and
* variation-less patterns. It comes from keeping the memory footprint small
* and having fewer memory pages opened and accessed for refreshing the display.
*
* i915 is responsible to reserve stolen memory for FBC and configure its
* offset on proper registers. The hardware takes care of all
* compress/decompress. However there are many known cases where we have to
* forcibly disable it to allow proper screen updates.
*/
 
#include "intel_drv.h"
#include "i915_drv.h"
 
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
return dev_priv->fbc.enable_fbc != NULL;
}
 
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
* origin so the x and y offsets can actually fit the registers. As a
* consequence, the fence doesn't really start exactly at the display plane
* address we program because it starts at the real start of the buffer, so we
* have to take this into consideration here.
*/
static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
{
return crtc->base.y - crtc->adjusted_y;
}
 
static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
{
u32 fbc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
 
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
/* Wait for compressing bit to clear */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
 
DRM_DEBUG_KMS("disabled FBC\n");
}
 
static void i8xx_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int cfb_pitch;
int i;
u32 fbc_ctl;
 
dev_priv->fbc.enabled = true;
 
/* Note: fbc.threshold == 1 for i8xx */
cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
 
/* FBC_CTL wants 32B or 64B units */
if (IS_GEN2(dev_priv))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
 
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG(i), 0);
 
if (IS_GEN4(dev_priv)) {
u32 fbc_ctl2;
 
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
}
 
/* enable it... */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
cfb_pitch, crtc->base.y, plane_name(crtc->plane));
}
 
static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
{
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
 
static void g4x_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 
I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
 
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
 
static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
{
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
{
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
{
I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
POSTING_READ(MSG_FBC_REND_STATE);
}
 
static void ilk_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
unsigned int y_offset;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
threshold++;
 
switch (threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
dpfc_ctl |= obj->fence_reg;
 
y_offset = get_crtc_fence_y_offset(crtc);
I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
}
 
intel_fbc_nuke(dev_priv);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
 
static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
{
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
{
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void gen7_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
 
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
threshold++;
 
switch (threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
 
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
 
if (IS_IVYBRIDGE(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
HSW_FBCQ_DIS);
}
 
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
 
intel_fbc_nuke(dev_priv);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
 
/**
* intel_fbc_enabled - Is FBC enabled?
* @dev_priv: i915 device instance
*
* This function is used to verify the current state of FBC.
* FIXME: This should be tracked in the plane config eventually
* instead of queried at runtime for most callers.
*/
bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
{
return dev_priv->fbc.enabled;
}
 
static void intel_fbc_enable(struct intel_crtc *crtc,
const struct drm_framebuffer *fb)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
dev_priv->fbc.enable_fbc(crtc);
 
dev_priv->fbc.crtc = crtc;
dev_priv->fbc.fb_id = fb->base.id;
dev_priv->fbc.y = crtc->base.y;
}
 
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
 
mutex_lock(&dev_priv->fbc.lock);
if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
if (crtc_fb == work->fb)
intel_fbc_enable(work->crtc, work->fb);
 
dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev_priv->fbc.lock);
 
kfree(work);
}
 
static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
if (dev_priv->fbc.fbc_work == NULL)
return;
 
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
 
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc.fbc_work);
 
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
dev_priv->fbc.fbc_work = NULL;
}
 
static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
{
struct intel_fbc_work *work;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
intel_fbc_cancel_work(dev_priv);
 
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
intel_fbc_enable(crtc, crtc->base.primary->fb);
return;
}
 
work->crtc = crtc;
work->fb = crtc->base.primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
dev_priv->fbc.fbc_work = work;
 
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
 
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
{
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
intel_fbc_cancel_work(dev_priv);
 
dev_priv->fbc.disable_fbc(dev_priv);
dev_priv->fbc.crtc = NULL;
}
 
/**
* intel_fbc_disable - disable FBC
* @dev_priv: i915 device instance
*
* This function disables FBC.
*/
void intel_fbc_disable(struct drm_i915_private *dev_priv)
{
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
__intel_fbc_disable(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
 
/*
* intel_fbc_disable_crtc - disable FBC if it's associated with crtc
* @crtc: the CRTC
*
* This function disables FBC if it's associated with the provided CRTC.
*/
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
if (dev_priv->fbc.crtc == crtc)
__intel_fbc_disable(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
 
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
{
switch (reason) {
case FBC_OK:
return "FBC enabled but currently disabled in hardware";
case FBC_UNSUPPORTED:
return "unsupported by this chipset";
case FBC_NO_OUTPUT:
return "no output";
case FBC_STOLEN_TOO_SMALL:
return "not enough stolen memory";
case FBC_UNSUPPORTED_MODE:
return "mode incompatible with compression";
case FBC_MODE_TOO_LARGE:
return "mode too large for compression";
case FBC_BAD_PLANE:
return "FBC unsupported on plane";
case FBC_NOT_TILED:
return "framebuffer not tiled or fenced";
case FBC_MULTIPLE_PIPES:
return "more than one pipe active";
case FBC_MODULE_PARAM:
return "disabled per module param";
case FBC_CHIP_DEFAULT:
return "disabled per chip default";
case FBC_ROTATION:
return "rotation unsupported";
case FBC_IN_DBG_MASTER:
return "Kernel debugger is active";
case FBC_BAD_STRIDE:
return "framebuffer stride not supported";
case FBC_PIXEL_RATE:
return "pixel rate is too big";
case FBC_PIXEL_FORMAT:
return "pixel format is invalid";
default:
MISSING_CASE(reason);
return "unknown reason";
}
}
 
static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
enum no_fbc_reason reason)
{
if (dev_priv->fbc.no_fbc_reason == reason)
return;
 
dev_priv->fbc.no_fbc_reason = reason;
DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
}
 
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc = NULL, *tmp_crtc;
enum pipe pipe;
bool pipe_a_only = false;
 
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
pipe_a_only = true;
 
for_each_pipe(dev_priv, pipe) {
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
if (intel_crtc_active(tmp_crtc) &&
to_intel_plane_state(tmp_crtc->primary->state)->visible)
crtc = tmp_crtc;
 
if (pipe_a_only)
break;
}
 
if (!crtc || crtc->primary->fb == NULL)
return NULL;
 
return crtc;
}
 
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
int n_pipes = 0;
struct drm_crtc *crtc;
 
if (INTEL_INFO(dev_priv)->gen > 4)
return true;
 
for_each_pipe(dev_priv, pipe) {
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
if (intel_crtc_active(crtc) &&
to_intel_plane_state(crtc->primary->state)->visible)
n_pipes++;
}
 
return (n_pipes < 2);
}
 
static int find_compression_threshold(struct drm_i915_private *dev_priv,
struct drm_mm_node *node,
int size,
int fb_cpp)
{
int compression_threshold = 1;
int ret;
u64 end;
 
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
else
end = dev_priv->gtt.stolen_usable_size;
 
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
*
* The enable_fbc code will attempt to use one of our 2 compression
* thresholds, therefore, in that case, we only have 1 resort.
*/
 
/* Try to over-allocate to reduce reallocations and fragmentation. */
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
4096, 0, end);
if (ret == 0)
return compression_threshold;
 
again:
/* HW's ability to limit the CFB is 1:4 */
if (compression_threshold > 4 ||
(fb_cpp == 2 && compression_threshold == 2))
return 0;
 
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
4096, 0, end);
if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
goto again;
} else {
return compression_threshold;
}
}
 
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
int fb_cpp)
{
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
 
ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
else if (ret > 1) {
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
 
}
 
dev_priv->fbc.threshold = ret;
 
if (INTEL_INFO(dev_priv)->gen >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev_priv)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
 
ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
4096, 4096);
if (ret)
goto err_fb;
 
dev_priv->fbc.compressed_llb = compressed_llb;
 
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
 
dev_priv->fbc.uncompressed_size = size;
 
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
dev_priv->fbc.compressed_fb.size,
dev_priv->fbc.threshold);
 
return 0;
 
err_fb:
kfree(compressed_llb);
i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
err_llb:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
 
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc.uncompressed_size == 0)
return;
 
i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
 
if (dev_priv->fbc.compressed_llb) {
i915_gem_stolen_remove_node(dev_priv,
dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
}
 
dev_priv->fbc.uncompressed_size = 0;
}
 
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
__intel_fbc_cleanup_cfb(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
 
/*
* For SKL+, the plane source size used by the hardware is based on the value we
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
int *width, int *height)
{
struct intel_plane_state *plane_state =
to_intel_plane_state(crtc->base.primary->state);
int w, h;
 
if (intel_rotation_90_or_270(plane_state->base.rotation)) {
w = drm_rect_height(&plane_state->src) >> 16;
h = drm_rect_width(&plane_state->src) >> 16;
} else {
w = drm_rect_width(&plane_state->src) >> 16;
h = drm_rect_height(&plane_state->src) >> 16;
}
 
if (width)
*width = w;
if (height)
*height = h;
}
 
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
int lines;
 
intel_fbc_get_plane_source_size(crtc, NULL, &lines);
if (INTEL_INFO(dev_priv)->gen >= 7)
lines = min(lines, 2048);
 
return lines * fb->pitches[0];
}
 
static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct drm_framebuffer *fb = crtc->base.primary->fb;
int size, cpp;
 
size = intel_fbc_calculate_cfb_size(crtc);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
if (size <= dev_priv->fbc.uncompressed_size)
return 0;
 
/* Release any current block */
__intel_fbc_cleanup_cfb(dev_priv);
 
return intel_fbc_alloc_cfb(dev_priv, size, cpp);
}
 
static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride)
{
/* These should have been caught earlier. */
WARN_ON(stride < 512);
WARN_ON((stride & (64 - 1)) != 0);
 
/* Below are the additional FBC restrictions. */
 
if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
return stride == 4096 || stride == 8192;
 
if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
return false;
 
if (stride > 16384)
return false;
 
return true;
}
 
static bool pixel_format_is_valid(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
return true;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
if (IS_GEN2(dev))
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
return false;
return true;
default:
return false;
}
}
 
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
* the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
* variables instead of just looking at the pipe/plane size.
*/
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
unsigned int effective_w, effective_h, max_w, max_h;
 
if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
max_w = 4096;
max_h = 2048;
} else {
max_w = 2048;
max_h = 1536;
}
 
intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
effective_w += crtc->adjusted_x;
effective_h += crtc->adjusted_y;
 
return effective_w <= max_w && effective_h <= max_h;
}
 
/**
* __intel_fbc_update - enable/disable FBC as needed, unlocked
* @dev_priv: i915 device instance
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= max_hdisplay in width, max_vdisplay in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
*/
static void __intel_fbc_update(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc = NULL;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
 
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
/* disable framebuffer compression in vGPU */
if (intel_vgpu_active(dev_priv->dev))
i915.enable_fbc = 0;
 
if (i915.enable_fbc < 0) {
set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
goto out_disable;
}
 
if (!i915.enable_fbc) {
set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
goto out_disable;
}
 
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
crtc = intel_fbc_find_crtc(dev_priv);
if (!crtc) {
set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
goto out_disable;
}
 
if (!multiple_pipes_ok(dev_priv)) {
set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
goto out_disable;
}
 
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config->base.adjusted_mode;
 
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
goto out_disable;
}
 
if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) {
set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
goto out_disable;
}
 
if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
intel_crtc->plane != PLANE_A) {
set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
goto out_disable;
}
 
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
goto out_disable;
}
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
set_no_fbc_reason(dev_priv, FBC_ROTATION);
goto out_disable;
}
 
if (!stride_is_valid(dev_priv, fb->pitches[0])) {
set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
goto out_disable;
}
 
if (!pixel_format_is_valid(fb)) {
set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
goto out_disable;
}
 
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master()) {
set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
goto out_disable;
}
 
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
ilk_pipe_pixel_rate(intel_crtc->config) >=
dev_priv->cdclk_freq * 95 / 100) {
set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
goto out_disable;
}
 
if (intel_fbc_setup_cfb(intel_crtc)) {
set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
goto out_disable;
}
 
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->fbc.crtc == intel_crtc &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
return;
 
if (intel_fbc_enabled(dev_priv)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
__intel_fbc_disable(dev_priv);
}
 
intel_fbc_schedule_enable(intel_crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
 
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev_priv)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
__intel_fbc_disable(dev_priv);
}
__intel_fbc_cleanup_cfb(dev_priv);
}
 
/*
* intel_fbc_update - enable/disable FBC as needed
* @dev_priv: i915 device instance
*
* This function reevaluates the overall state and enables or disables FBC.
*/
void intel_fbc_update(struct drm_i915_private *dev_priv)
{
if (!fbc_supported(dev_priv))
return;
 
mutex_lock(&dev_priv->fbc.lock);
__intel_fbc_update(dev_priv);
mutex_unlock(&dev_priv->fbc.lock);
}
 
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
unsigned int fbc_bits;
 
if (!fbc_supported(dev_priv))
return;
 
if (origin == ORIGIN_GTT)
return;
 
mutex_lock(&dev_priv->fbc.lock);
 
if (dev_priv->fbc.enabled)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
else if (dev_priv->fbc.fbc_work)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
dev_priv->fbc.fbc_work->crtc->pipe);
else
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
 
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
 
if (dev_priv->fbc.busy_bits)
__intel_fbc_disable(dev_priv);
 
mutex_unlock(&dev_priv->fbc.lock);
}
 
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
if (!fbc_supported(dev_priv))
return;
 
if (origin == ORIGIN_GTT)
return;
 
mutex_lock(&dev_priv->fbc.lock);
 
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
 
if (!dev_priv->fbc.busy_bits) {
__intel_fbc_disable(dev_priv);
__intel_fbc_update(dev_priv);
}
 
mutex_unlock(&dev_priv->fbc.lock);
}
 
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
*
* This function might be called during PM init process.
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
 
mutex_init(&dev_priv->fbc.lock);
 
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
return;
}
 
for_each_pipe(dev_priv, pipe) {
dev_priv->fbc.possible_framebuffer_bits |=
INTEL_FRONTBUFFER_PRIMARY(pipe);
 
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
break;
}
 
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
dev_priv->fbc.enable_fbc = gen7_fbc_enable;
dev_priv->fbc.disable_fbc = ilk_fbc_disable;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
dev_priv->fbc.enable_fbc = ilk_fbc_enable;
dev_priv->fbc.disable_fbc = ilk_fbc_disable;
} else if (IS_GM45(dev_priv)) {
dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
dev_priv->fbc.enable_fbc = g4x_fbc_enable;
dev_priv->fbc.disable_fbc = g4x_fbc_disable;
} else {
dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
 
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
 
dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
}
/drivers/video/drm/i915/intel_fbdev.c
29,7 → 29,7
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
//#include <linux/mm.h>
#include <linux/mm.h>
//#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
44,7 → 44,6
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
 
struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
{
#define BYTES_PER_LONG (BITS_PER_LONG/8)
81,16 → 80,8
ret = drm_fb_helper_set_par(info);
 
if (ret == 0) {
/*
* FIXME: fbdev presumes that all callbacks also work from
* atomic contexts and relies on that for emergency oops
* printing. KMS totally doesn't do that and the locking here is
* by far not the only place this goes wrong. Ignore this for
* now until we solve this for real.
*/
mutex_lock(&fb_helper->dev->struct_mutex);
ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
true);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
 
97,6 → 88,43
return ret;
}
 
static int intel_fbdev_blank(int blank, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct intel_fbdev *ifbdev =
container_of(fb_helper, struct intel_fbdev, helper);
int ret;
 
ret = drm_fb_helper_blank(blank, info);
 
if (ret == 0) {
mutex_lock(&fb_helper->dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
 
return ret;
}
 
static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct intel_fbdev *ifbdev =
container_of(fb_helper, struct intel_fbdev, helper);
 
int ret;
ret = drm_fb_helper_pan_display(var, info);
 
if (ret == 0) {
mutex_lock(&fb_helper->dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
 
return ret;
}
 
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
104,8 → 132,8
// .fb_fillrect = cfb_fillrect,
// .fb_copyarea = cfb_copyarea,
// .fb_imageblit = cfb_imageblit,
// .fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
// .fb_setcmap = drm_fb_helper_setcmap,
// .fb_debug_enter = drm_fb_helper_debug_enter,
// .fb_debug_leave = drm_fb_helper_debug_leave,
118,8 → 146,9
container_of(helper, struct intel_fbdev, helper);
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj = NULL;
int size, ret;
 
/* we don't do packed 24bpp */
135,9 → 164,9
sizes->surface_depth);
 
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
size = PAGE_ALIGN(size);
obj = main_fb_obj;
obj->stride = mode_cmd.pitches[0];
obj->map_and_fenceable=true;
if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
151,7 → 180,7
}
 
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_fb;
214,9 → 243,9
obj = intel_fb->obj;
size = obj->base.size;
 
info = framebuffer_alloc(0, &dev->pdev->dev);
if (!info) {
ret = -ENOMEM;
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out_unpin;
}
 
224,8 → 253,10
 
fb = &ifbdev->fb->base;
 
if(main_framebuffer == NULL)
main_framebuffer = fb;
 
ifbdev->helper.fb = fb;
ifbdev->helper.fbdev = info;
 
strcpy(info->fix.id, "inteldrmfb");
 
233,11 → 264,6
info->fbops = &intelfb_ops;
 
/* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
253,7 → 279,7
 
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
fb->width, fb->height,
i915_gem_obj_ggtt_offset(obj), obj);
 
260,6 → 286,8
mutex_unlock(&dev->struct_mutex);
return 0;
 
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
440,18 → 468,13
* IMPORTANT: We want to use the adjusted mode (i.e.
* after the panel fitter upscaling) as the initial
* config, not the input mode, which is what crtc->mode
* usually contains. But since our current fastboot
* usually contains. But since our current
* code puts a mode derived from the post-pfit timings
* into crtc->mode this works out correctly. We don't
* use hwmode anywhere right now, so use it for this
* since the fb helper layer wants a pointer to
* something we own.
* into crtc->mode this works out correctly.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
&to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode;
modes[i] = &encoder->crtc->mode;
}
crtcs[i] = new_crtc;
 
518,28 → 541,25
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
struct intel_plane_config *plane_config = NULL;
unsigned int max_size = 0;
 
if (!i915.fastboot)
return false;
 
/* Find the largest fb */
for_each_crtc(dev, crtc) {
struct drm_i915_gem_object *obj =
intel_fb_obj(crtc->primary->state->fb);
intel_crtc = to_intel_crtc(crtc);
 
if (!intel_crtc->active || !crtc->primary->fb) {
if (!crtc->state->active || !obj) {
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
 
if (intel_crtc->plane_config.size > max_size) {
if (obj->base.size > max_size) {
DRM_DEBUG_KMS("found possible fb from plane %c\n",
pipe_name(intel_crtc->pipe));
plane_config = &intel_crtc->plane_config;
fb = to_intel_framebuffer(crtc->primary->fb);
max_size = plane_config->size;
fb = to_intel_framebuffer(crtc->primary->state->fb);
max_size = obj->base.size;
}
}
 
554,7 → 574,7
 
intel_crtc = to_intel_crtc(crtc);
 
if (!intel_crtc->active) {
if (!crtc->state->active) {
DRM_DEBUG_KMS("pipe %c not active, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
568,24 → 588,25
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.bits_per_pixel / 8;
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, fb->base.pitches[0]);
plane_config = NULL;
fb = NULL;
break;
}
 
cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
fb->base.pixel_format,
fb->base.modifier[0]);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
intel_crtc->config.adjusted_mode.crtc_hdisplay,
intel_crtc->config.adjusted_mode.crtc_vdisplay,
intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
fb->base.bits_per_pixel,
cur_size);
 
593,7 → 614,6
DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, max_size);
plane_config = NULL;
fb = NULL;
break;
}
617,7 → 637,7
for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
 
if (!intel_crtc->active)
if (!crtc->state->active)
continue;
 
WARN(!crtc->primary->fb,
659,6 → 679,8
return ret;
}
 
ifbdev->helper.atomic = true;
 
dev_priv->fbdev = ifbdev;
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
 
/drivers/video/drm/i915/intel_fifo_underrun.c
282,16 → 282,6
return ret;
}
 
static bool
__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
return !intel_crtc->cpu_fifo_underrun_disabled;
}
 
/**
* intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
* @dev_priv: i915 device instance
341,7 → 331,7
}
 
/**
* intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
* intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
* @dev_priv: i915 device instance
* @pipe: (CPU) pipe to set state for
*
352,9 → 342,15
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
return;
 
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
!__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
return;
 
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
/drivers/video/drm/i915/intel_frontbuffer.c
65,80 → 65,22
#include "intel_drv.h"
#include "i915_drv.h"
 
static void intel_increase_pllclock(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_reg = DPLL(pipe);
int dpll;
 
if (!HAS_GMCH_DISPLAY(dev))
return;
 
if (!dev_priv->lvds_downclock_avail)
return;
 
dpll = I915_READ(dpll_reg);
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
assert_panel_unlocked(dev_priv, pipe);
 
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
 
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
}
}
 
/**
* intel_mark_fb_busy - mark given planes as busy
* @dev: DRM device
* @frontbuffer_bits: bits for the affected planes
* @ring: optional ring for asynchronous commands
*
* This function gets called every time the screen contents change. It can be
* used to keep e.g. the update rate at the nominal refresh rate with DRRS.
*/
static void intel_mark_fb_busy(struct drm_device *dev,
unsigned frontbuffer_bits,
struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
 
if (!i915.powersave)
return;
 
for_each_pipe(dev_priv, pipe) {
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
 
intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
 
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
* be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
* be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
145,7 → 87,7
if (!obj->frontbuffer_bits)
return;
 
if (ring) {
if (origin == ORIGIN_CS) {
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.busy_bits
|= obj->frontbuffer_bits;
154,9 → 96,9
mutex_unlock(&dev_priv->fb_tracking.lock);
}
 
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
 
intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
}
 
/**
163,6 → 105,7
* intel_frontbuffer_flush - flush frontbuffer
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given planes has
* completed and frontbuffer caching can be started again. Flushes will get
170,10 → 113,11
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
static void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits,
enum fb_op_origin origin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
 
/* Delay flushing when rings are still busy.*/
mutex_lock(&dev_priv->fb_tracking.lock);
180,25 → 124,19
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
 
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
if (!frontbuffer_bits)
return;
 
intel_psr_flush(dev, frontbuffer_bits);
 
/*
* FIXME: Unconditional fbc flushing here is a rather gross hack and
* needs to be reworked into a proper frontbuffer tracking scheme like
* psr employs.
*/
if (dev_priv->fbc.need_sw_cache_clean) {
dev_priv->fbc.need_sw_cache_clean = false;
bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits, origin);
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
}
}
 
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
* @retire: set when retiring asynchronous rendering
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again. If @retire is true
205,10 → 143,10
* then any delayed flushes will be unblocked.
*/
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire)
bool retire, enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned frontbuffer_bits;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
227,7 → 165,7
mutex_unlock(&dev_priv->fb_tracking.lock);
}
 
intel_frontbuffer_flush(dev, frontbuffer_bits);
intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
}
 
/**
245,7 → 183,7
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
 
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
252,6 → 190,8
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
 
intel_psr_single_frame_update(dev, frontbuffer_bits);
}
 
/**
267,7 → 207,7
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
 
mutex_lock(&dev_priv->fb_tracking.lock);
/* Mask any cancelled flips. */
275,5 → 215,29
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
 
intel_frontbuffer_flush(dev, frontbuffer_bits);
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}
 
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
* synchronous plane updates which will happen on the next vblank and which will
* not get delayed by pending gpu rendering.
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = to_i915(dev);
 
mutex_lock(&dev_priv->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
 
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}
/drivers/video/drm/i915/intel_guc.h
0,0 → 1,124
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _INTEL_GUC_H_
#define _INTEL_GUC_H_
 
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
 
struct i915_guc_client {
struct drm_i915_gem_object *client_obj;
struct intel_context *owner;
struct intel_guc *guc;
uint32_t priority;
uint32_t ctx_index;
 
uint32_t proc_desc_offset;
uint32_t doorbell_offset;
uint32_t cookie;
uint16_t doorbell_id;
uint16_t padding; /* Maintain alignment */
 
uint32_t wq_offset;
uint32_t wq_size;
 
spinlock_t wq_lock; /* Protects all data below */
uint32_t wq_tail;
 
/* GuC submission statistics & status */
uint64_t submissions[I915_NUM_RINGS];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
};
 
enum intel_guc_fw_status {
GUC_FIRMWARE_FAIL = -1,
GUC_FIRMWARE_NONE = 0,
GUC_FIRMWARE_PENDING,
GUC_FIRMWARE_SUCCESS
};
 
/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the GuC.
*/
struct intel_guc_fw {
struct drm_device * guc_dev;
const char * guc_fw_path;
size_t guc_fw_size;
struct drm_i915_gem_object * guc_fw_obj;
enum intel_guc_fw_status guc_fw_fetch_status;
enum intel_guc_fw_status guc_fw_load_status;
 
uint16_t guc_fw_major_wanted;
uint16_t guc_fw_minor_wanted;
uint16_t guc_fw_major_found;
uint16_t guc_fw_minor_found;
};
 
struct intel_guc {
struct intel_guc_fw guc_fw;
 
uint32_t log_flags;
struct drm_i915_gem_object *log_obj;
 
struct drm_i915_gem_object *ctx_pool_obj;
struct ida ctx_ids;
 
struct i915_guc_client *execbuf_client;
 
spinlock_t host2guc_lock; /* Protects all data below */
 
DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
 
/* Action status & statistics */
uint64_t action_count; /* Total commands issued */
uint32_t action_cmd; /* Last command word */
uint32_t action_status; /* Last return status */
uint32_t action_fail; /* Total number of failures */
int32_t action_err; /* Last error code */
 
uint64_t submissions[I915_NUM_RINGS];
uint32_t last_seqno[I915_NUM_RINGS];
};
 
/* intel_guc_loader.c */
extern void intel_guc_ucode_init(struct drm_device *dev);
extern int intel_guc_ucode_load(struct drm_device *dev);
extern void intel_guc_ucode_fini(struct drm_device *dev);
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
extern int intel_guc_suspend(struct drm_device *dev);
extern int intel_guc_resume(struct drm_device *dev);
 
/* i915_guc_submission.c */
int i915_guc_submission_init(struct drm_device *dev);
int i915_guc_submission_enable(struct drm_device *dev);
int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
 
#endif
/drivers/video/drm/i915/intel_guc_fwif.h
0,0 → 1,260
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
 
/*
* This file is partially autogenerated, although currently with some manual
* fixups afterwards. In future, it should be entirely autogenerated, in order
* to ensure that the definitions herein remain in sync with those used by the
* GuC's own firmware.
*
* EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
*/
 
#define GFXCORE_FAMILY_GEN9 12
#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
 
#define GUC_CTX_PRIORITY_KMD_HIGH 0
#define GUC_CTX_PRIORITY_HIGH 1
#define GUC_CTX_PRIORITY_KMD_NORMAL 2
#define GUC_CTX_PRIORITY_NORMAL 3
 
#define GUC_MAX_GPU_CONTEXTS 1024
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
 
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
#define WQ_STATUS_CMD_ERROR 3
#define WQ_STATUS_ENGINE_ID_NOT_USED 4
#define WQ_STATUS_SUSPENDED_FROM_RESET 5
#define WQ_TYPE_SHIFT 0
#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT)
#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT)
#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT)
#define WQ_TARGET_SHIFT 10
#define WQ_LEN_SHIFT 16
#define WQ_NO_WCFLUSH_WAIT (1 << 27)
#define WQ_PRESENT_WORKLOAD (1 << 28)
#define WQ_WORKLOAD_SHIFT 29
#define WQ_WORKLOAD_GENERAL (0 << WQ_WORKLOAD_SHIFT)
#define WQ_WORKLOAD_GPGPU (1 << WQ_WORKLOAD_SHIFT)
#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
 
#define WQ_RING_TAIL_SHIFT 20
#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT)
 
#define GUC_DOORBELL_ENABLED 1
#define GUC_DOORBELL_DISABLED 0
 
#define GUC_CTX_DESC_ATTR_ACTIVE (1 << 0)
#define GUC_CTX_DESC_ATTR_PENDING_DB (1 << 1)
#define GUC_CTX_DESC_ATTR_KERNEL (1 << 2)
#define GUC_CTX_DESC_ATTR_PREEMPT (1 << 3)
#define GUC_CTX_DESC_ATTR_RESET (1 << 4)
#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5)
#define GUC_CTX_DESC_ATTR_PCH (1 << 6)
#define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7)
 
/* The guc control data is 10 DWORDs */
#define GUC_CTL_CTXINFO 0
#define GUC_CTL_CTXNUM_IN16_SHIFT 0
#define GUC_CTL_BASE_ADDR_SHIFT 12
#define GUC_CTL_ARAT_HIGH 1
#define GUC_CTL_ARAT_LOW 2
#define GUC_CTL_DEVICE_INFO 3
#define GUC_CTL_GTTYPE_SHIFT 0
#define GUC_CTL_COREFAMILY_SHIFT 7
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_PAGES 1
#define GUC_LOG_CRASH_SHIFT 4
#define GUC_LOG_DPC_PAGES 3
#define GUC_LOG_DPC_SHIFT 6
#define GUC_LOG_ISR_PAGES 3
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_PAGE_FAULT_CONTROL 5
#define GUC_CTL_WA 6
#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
#define GUC_CTL_FEATURE 7
#define GUC_CTL_VCS2_ENABLED (1 << 0)
#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
#define GUC_CTL_FEATURE2 (1 << 2)
#define GUC_CTL_POWER_GATING (1 << 3)
#define GUC_CTL_DISABLE_SCHEDULER (1 << 4)
#define GUC_CTL_PREEMPTION_LOG (1 << 5)
#define GUC_CTL_ENABLE_SLPC (1 << 7)
#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
#define GUC_CTL_DEBUG 8
#define GUC_LOG_VERBOSITY_SHIFT 0
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
/* Verbosity range-check limits, without the shift */
#define GUC_LOG_VERBOSITY_MIN 0
#define GUC_LOG_VERBOSITY_MAX 3
#define GUC_CTL_RSRVD 9
 
#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
 
struct guc_doorbell_info {
u32 db_status;
u32 cookie;
u32 reserved[14];
} __packed;
 
union guc_doorbell_qw {
struct {
u32 db_status;
u32 cookie;
};
u64 value_qw;
} __packed;
 
#define GUC_MAX_DOORBELLS 256
#define GUC_INVALID_DOORBELL_ID (GUC_MAX_DOORBELLS)
 
#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
 
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
u32 context_desc;
u32 ring_tail;
u32 fence_id;
} __packed;
 
struct guc_process_desc {
u32 context_id;
u64 db_base_addr;
u32 head;
u32 tail;
u32 error_offset;
u64 wq_base_addr;
u32 wq_size_bytes;
u32 wq_status;
u32 engine_presence;
u32 priority;
u32 reserved[30];
} __packed;
 
/* engine id and context id is packed into guc_execlist_context.context_id*/
#define GUC_ELC_CTXID_OFFSET 0
#define GUC_ELC_ENGINE_OFFSET 29
 
/* The execlist context including software and HW information */
struct guc_execlist_context {
u32 context_desc;
u32 context_id;
u32 ring_status;
u32 ring_lcra;
u32 ring_begin;
u32 ring_end;
u32 ring_next_free_location;
u32 ring_current_tail_pointer_value;
u8 engine_state_submit_value;
u8 engine_state_wait_value;
u16 pagefault_count;
u16 engine_submit_queue_count;
} __packed;
 
/*Context descriptor for communicating between uKernel and Driver*/
struct guc_context_desc {
u32 sched_common_area;
u32 context_id;
u32 pas_id;
u8 engines_used;
u64 db_trigger_cpu;
u32 db_trigger_uk;
u64 db_trigger_phy;
u16 db_id;
 
struct guc_execlist_context lrc[I915_NUM_RINGS];
 
u8 attribute;
 
u32 priority;
 
u32 wq_sampled_tail_offset;
u32 wq_total_submit_enqueues;
 
u32 process_desc;
u32 wq_addr;
u32 wq_size;
 
u32 engine_presence;
 
u8 engine_suspended;
 
u8 reserved0[3];
u64 reserved1[1];
 
u64 desc_private;
} __packed;
 
#define GUC_FORCEWAKE_RENDER (1 << 0)
#define GUC_FORCEWAKE_MEDIA (1 << 1)
 
#define GUC_POWER_UNSPECIFIED 0
#define GUC_POWER_D0 1
#define GUC_POWER_D1 2
#define GUC_POWER_D2 3
#define GUC_POWER_D3 4
 
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
HOST2GUC_ACTION_LIMIT
};
 
/*
* The GuC sends its response to a command by overwriting the
* command in SS0. The response is distinguishable from a command
* by the fact that all the MASK bits are set. The remaining bits
* give more detail.
*/
#define GUC2HOST_RESPONSE_MASK ((u32)0xF0000000)
#define GUC2HOST_IS_RESPONSE(x) ((u32)(x) >= GUC2HOST_RESPONSE_MASK)
#define GUC2HOST_STATUS(x) (GUC2HOST_RESPONSE_MASK | (x))
 
/* GUC will return status back to SOFT_SCRATCH_O_REG */
enum guc2host_status {
GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
};
 
#endif
/drivers/video/drm/i915/intel_guc_loader.c
0,0 → 1,609
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Vinit Azad <vinit.azad@intel.com>
* Ben Widawsky <ben@bwidawsk.net>
* Dave Gordon <david.s.gordon@intel.com>
* Alex Dai <yu.dai@intel.com>
*/
#include <linux/firmware.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_guc.h"
 
/**
* DOC: GuC
*
* intel_guc:
* Top level structure of guc. It handles firmware loading and manages client
* pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
* ExecList submission.
*
* Firmware versioning:
* The firmware build process will generate a version header file with major and
* minor version defined. The versions are built into CSS header of firmware.
* i915 kernel driver set the minimal firmware version required per platform.
* The firmware installation package will install (symbolic link) proper version
* of firmware.
*
* GuC address space:
* GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
* which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
*
* Firmware log:
* Firmware log is enabled by setting i915.guc_log_level to non-negative level.
* Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
* i915_guc_load_status will print out firmware loading status and scratch
* registers value.
*
*/
 
#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 
/* User-friendly representation of an enum */
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
{
switch (status) {
case GUC_FIRMWARE_FAIL:
return "FAIL";
case GUC_FIRMWARE_NONE:
return "NONE";
case GUC_FIRMWARE_PENDING:
return "PENDING";
case GUC_FIRMWARE_SUCCESS:
return "SUCCESS";
default:
return "UNKNOWN!";
}
};
 
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
int i, irqs;
 
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(ring), irqs);
 
/* route all GT interrupts to the host */
I915_WRITE(GUC_BCS_RCS_IER, 0);
I915_WRITE(GUC_VCS2_VCS1_IER, 0);
I915_WRITE(GUC_WD_VECS_IER, 0);
}
 
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
int i, irqs;
 
/* tell all command streamers to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(ring), irqs);
 
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
/* These three registers have the same bit definitions */
I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
I915_WRITE(GUC_WD_VECS_IER, ~irqs);
}
 
static u32 get_gttype(struct drm_i915_private *dev_priv)
{
/* XXX: GT type based on PCI device ID? field seems unused by fw */
return 0;
}
 
static u32 get_core_family(struct drm_i915_private *dev_priv)
{
switch (INTEL_INFO(dev_priv)->gen) {
case 9:
return GFXCORE_FAMILY_GEN9;
 
default:
DRM_ERROR("GUC: unsupported core family\n");
return GFXCORE_FAMILY_UNKNOWN;
}
}
 
static void set_guc_init_params(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
u32 params[GUC_CTL_MAX_DWORDS];
int i;
 
memset(&params, 0, sizeof(params));
 
params[GUC_CTL_DEVICE_INFO] |=
(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
 
/*
* GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
* second. This ARAR is calculated by:
* Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
*/
params[GUC_CTL_ARAT_HIGH] = 0;
params[GUC_CTL_ARAT_LOW] = 100000000;
 
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
 
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
 
if (i915.guc_log_level >= 0) {
params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
params[GUC_CTL_DEBUG] =
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
}
 
/* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) {
u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
 
pgs >>= PAGE_SHIFT;
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
 
params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
 
/* Unmask this bit to enable the GuC's internal scheduler */
params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
}
 
I915_WRITE(SOFT_SCRATCH(0), 0);
 
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
}
 
/*
* Read the GuC status register (GUC_STATUS) and store it in the
* specified location; then return a boolean indicating whether
* the value matches either of two values representing completion
* of the GuC boot process.
*
* This is used for polling the GuC status in a wait_for_atomic()
* loop below.
*/
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
u32 *status)
{
u32 val = I915_READ(GUC_STATUS);
u32 uk_val = val & GS_UKERNEL_MASK;
*status = val;
return (uk_val == GS_UKERNEL_READY ||
((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
}
 
/*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* GuC Firmware layout:
* +-------------------------------+ ----
* | CSS header | 128B
* | contains major/minor version |
* +-------------------------------+ ----
* | uCode |
* +-------------------------------+ ----
* | RSA signature | 256B
* +-------------------------------+ ----
*
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*
* Note that GuC needs the CSS header plus uKernel code to be copied by the
* DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
*/
 
#define UOS_CSS_HEADER_OFFSET 0
#define UOS_VER_MINOR_OFFSET 0x44
#define UOS_VER_MAJOR_OFFSET 0x46
#define UOS_CSS_HEADER_SIZE 0x80
#define UOS_RSA_SIG_SIZE 0x100
 
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
unsigned long offset;
struct sg_table *sg = fw_obj->pages;
u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)];
int i, ret = 0;
 
/* uCode size, also is where RSA signature starts */
offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE;
I915_WRITE(DMA_COPY_SIZE, ucode_size);
 
/* Copy RSA signature from the fw image to HW for verification */
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
 
/* Set the source address for the new blob */
offset = i915_gem_obj_ggtt_offset(fw_obj);
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
/*
* Set the DMA destination. Current uCode expects the code to be
* loaded at 8k; locations below this are used for the stack.
*/
I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
 
/* Finally start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
 
/*
* Spin-wait for the DMA to complete & the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver will attempt to fall back to
* execlist mode if this happens.)
*/
ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
 
DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
I915_READ(DMA_CTRL), status);
 
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
DRM_ERROR("GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
 
DRM_DEBUG_DRIVER("returning %d\n", ret);
 
return ret;
}
 
/*
* Load the GuC firmware blob into the MinuteIA.
*/
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct drm_device *dev = dev_priv->dev;
int ret;
 
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
if (ret) {
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
return ret;
}
 
ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
if (ret) {
DRM_DEBUG_DRIVER("pin failed %d\n", ret);
return ret;
}
 
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
/* init WOPCM */
I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
 
/* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
 
/* WaDisableMinuteIaClockGating:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
 
/* WaC6DisallowByGfxPause*/
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
 
if (IS_BROXTON(dev))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
 
if (IS_GEN9(dev)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
 
/* allows for 5us before GT can go to RC6 */
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
}
 
set_guc_init_params(dev_priv);
 
ret = guc_ucode_xfer_dma(dev_priv);
 
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
/*
* We keep the object pages for reuse during resume. But we can unpin it
* now that DMA has completed, so it doesn't continue to take up space.
*/
i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
 
return ret;
}
 
/**
* intel_guc_ucode_load() - load GuC uCode into the device
* @dev: drm device
*
* Called from gem_init_hw() during driver loading and also after a GPU reset.
*
* The firmware image should have already been fetched into memory by the
* earlier call to intel_guc_ucode_init(), so here we need only check that
* is succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
int intel_guc_ucode_load(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
int err = 0;
 
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
 
direct_interrupts_to_host(dev_priv);
 
if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
return 0;
 
if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
return -ENOEXEC;
 
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
 
DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
 
switch (guc_fw->guc_fw_fetch_status) {
case GUC_FIRMWARE_FAIL:
/* something went wrong :( */
err = -EIO;
goto fail;
 
case GUC_FIRMWARE_NONE:
case GUC_FIRMWARE_PENDING:
default:
/* "can't happen" */
WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
guc_fw->guc_fw_path,
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
guc_fw->guc_fw_fetch_status);
err = -ENXIO;
goto fail;
 
case GUC_FIRMWARE_SUCCESS:
break;
}
 
err = i915_guc_submission_init(dev);
if (err)
goto fail;
 
err = guc_ucode_xfer(dev_priv);
if (err)
goto fail;
 
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
 
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
 
if (i915.enable_guc_submission) {
/* The execbuf_client will be recreated. Release it first. */
i915_guc_submission_disable(dev);
 
err = i915_guc_submission_enable(dev);
if (err)
goto fail;
direct_interrupts_to_guc(dev_priv);
}
 
return 0;
 
fail:
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
 
direct_interrupts_to_host(dev_priv);
i915_guc_submission_disable(dev);
 
return err;
}
 
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
{
struct drm_i915_gem_object *obj;
const struct firmware *fw;
const u8 *css_header;
const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE;
const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
- 0x8000; /* 32k reserved (8K stack + 24k context) */
int err;
 
DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
 
err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
if (err)
goto fail;
if (!fw)
goto fail;
 
DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
guc_fw->guc_fw_path, fw);
DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
fw->size, minsize, maxsize);
 
/* Check the size of the blob befoe examining buffer contents */
if (fw->size < minsize || fw->size > maxsize)
goto fail;
 
/*
* The GuC firmware image has the version number embedded at a well-known
* offset within the firmware blob; note that major / minor version are
* TWO bytes each (i.e. u16), although all pointers and offsets are defined
* in terms of bytes (u8).
*/
css_header = fw->data + UOS_CSS_HEADER_OFFSET;
guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET);
guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
 
if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
err = -ENOEXEC;
goto fail;
}
 
DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
 
mutex_lock(&dev->struct_mutex);
obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR_OR_NULL(obj)) {
err = obj ? PTR_ERR(obj) : -ENOMEM;
goto fail;
}
 
guc_fw->guc_fw_obj = obj;
guc_fw->guc_fw_size = fw->size;
 
DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
guc_fw->guc_fw_obj);
 
release_firmware(fw);
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
return;
 
fail:
DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
err, fw, guc_fw->guc_fw_obj);
DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
guc_fw->guc_fw_path, err);
 
obj = guc_fw->guc_fw_obj;
if (obj)
drm_gem_object_unreference(&obj->base);
guc_fw->guc_fw_obj = NULL;
 
release_firmware(fw); /* OK even if fw is NULL */
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
}
 
/**
* intel_guc_ucode_init() - define parameters and fetch firmware
* @dev: drm device
*
* Called early during driver load, but after GEM is initialised.
*
* The firmware will be transferred to the GuC's memory later,
* when intel_guc_ucode_load() is called.
*/
void intel_guc_ucode_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
 
if (!HAS_GUC_SCHED(dev))
i915.enable_guc_submission = false;
 
if (!HAS_GUC_UCODE(dev)) {
fw_path = NULL;
} else if (IS_SKYLAKE(dev)) {
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = 4;
guc_fw->guc_fw_minor_wanted = 3;
} else {
i915.enable_guc_submission = false;
fw_path = ""; /* unknown device */
}
 
guc_fw->guc_dev = dev;
guc_fw->guc_fw_path = fw_path;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
 
if (fw_path == NULL)
return;
 
if (*fw_path == '\0') {
DRM_ERROR("No GuC firmware known for this platform\n");
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
return;
}
 
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
guc_fw_fetch(dev, guc_fw);
/* status must now be FAIL or SUCCESS */
}
 
/**
* intel_guc_ucode_fini() - clean up all allocated resources
* @dev: drm device
*/
void intel_guc_ucode_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
direct_interrupts_to_host(dev_priv);
i915_guc_submission_fini(dev);
 
mutex_lock(&dev->struct_mutex);
if (guc_fw->guc_fw_obj)
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
guc_fw->guc_fw_obj = NULL;
mutex_unlock(&dev->struct_mutex);
 
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
}
/drivers/video/drm/i915/intel_hdmi.c
31,6 → 31,7
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
112,17 → 113,18
}
}
 
static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder,
struct drm_i915_private *dev_priv)
enum hdmi_infoframe_type type,
int i)
{
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_VENDOR:
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
173,10 → 175,14
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
 
if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
return val & VIDEO_DIP_ENABLE;
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
 
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
return false;
 
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
}
 
static void ibx_write_infoframe(struct drm_encoder *encoder,
222,10 → 228,19
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
return val & VIDEO_DIP_ENABLE;
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
 
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
return false;
 
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
 
static void cpt_write_infoframe(struct drm_encoder *encoder,
277,7 → 292,12
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
return val & VIDEO_DIP_ENABLE;
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
 
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
 
static void vlv_write_infoframe(struct drm_encoder *encoder,
323,10 → 343,19
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
return val & VIDEO_DIP_ENABLE;
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
 
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
return false;
 
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
 
static void hsw_write_infoframe(struct drm_encoder *encoder,
337,14 → 366,13
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
u32 data_reg;
int i;
u32 val = I915_READ(ctl_reg);
 
data_reg = hsw_infoframe_data_reg(type,
intel_crtc->config.cpu_transcoder,
dev_priv);
data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
if (data_reg == 0)
return;
 
353,12 → 381,14
 
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(data_reg + i, *data);
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(data_reg + i, 0);
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0);
mmiowb();
 
val |= hsw_infoframe_enable(type);
371,11 → 401,12
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(ctl_reg);
 
return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
VIDEO_DIP_ENABLE_VS_HSW);
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
}
 
/*
418,7 → 449,7
}
 
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
425,9 → 456,6
union hdmi_infoframe frame;
int ret;
 
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
 
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
436,7 → 464,7
}
 
if (intel_hdmi->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
if (intel_crtc->config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
465,7 → 493,7
 
static void
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
union hdmi_infoframe frame;
int ret;
480,7 → 508,7
 
static void g4x_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
505,7 → 533,13
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
if (port != (val & VIDEO_DIP_PORT_MASK)) {
DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
513,9 → 547,9
 
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
522,7 → 556,8
}
 
val |= VIDEO_DIP_ENABLE;
val &= ~VIDEO_DIP_ENABLE_VENDOR;
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 
I915_WRITE(reg, val);
POSTING_READ(reg);
532,9 → 567,100
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
 
static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
/*
* HDMI cloning is only supported on g4x which doesn't
* support deep color or GCP infoframes anyway so no
* need to worry about multiple HDMI sinks here.
*/
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder)
return connector->display_info.bpc > 8;
 
return false;
}
 
/*
* Determine if default_phase=1 can be indicated in the GCP infoframe.
*
* From HDMI specification 1.4a:
* - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
* - The first pixel following each Video Data Period shall have a pixel packing phase of 0
* - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
* - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
* phase of 0
*/
static bool gcp_default_phase_possible(int pipe_bpp,
const struct drm_display_mode *mode)
{
unsigned int pixels_per_group;
 
switch (pipe_bpp) {
case 30:
/* 4 pixels in 5 clocks */
pixels_per_group = 4;
break;
case 36:
/* 2 pixels in 3 clocks */
pixels_per_group = 2;
break;
case 48:
/* 1 pixel in 2 clocks */
pixels_per_group = 1;
break;
default:
/* phase information not relevant for 8bpc */
return false;
}
 
return mode->crtc_hdisplay % pixels_per_group == 0 &&
mode->crtc_htotal % pixels_per_group == 0 &&
mode->crtc_hblank_start % pixels_per_group == 0 &&
mode->crtc_hblank_end % pixels_per_group == 0 &&
mode->crtc_hsync_start % pixels_per_group == 0 &&
mode->crtc_hsync_end % pixels_per_group == 0 &&
((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
mode->crtc_htotal/2 % pixels_per_group == 0);
}
 
static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
u32 reg, val = 0;
 
if (HAS_DDI(dev_priv))
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv->dev))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return false;
 
/* Indicate color depth whenever the sink supports deep color */
if (hdmi_sink_is_deep_color(encoder))
val |= GCP_COLOR_INDICATION;
 
/* Enable default_phase whenever the display mode is suitably aligned */
if (gcp_default_phase_possible(crtc->config->pipe_bpp,
&crtc->config->base.adjusted_mode))
val |= GCP_DEFAULT_PHASE_ENABLE;
 
I915_WRITE(reg, val);
 
return val != 0;
}
 
static void ibx_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
552,7 → 678,9
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
559,19 → 687,21
}
 
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
WARN(val & VIDEO_DIP_ENABLE,
"DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
 
val |= VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 
if (intel_hdmi_set_gcp_infoframe(encoder))
val |= VIDEO_DIP_ENABLE_GCP;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
 
582,7 → 712,7
 
static void cpt_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
598,7 → 728,9
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
607,8 → 739,11
/* Set both together, unset both together: see the spec. */
val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 
if (intel_hdmi_set_gcp_infoframe(encoder))
val |= VIDEO_DIP_ENABLE_GCP;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
 
619,7 → 754,7
 
static void vlv_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
637,7 → 772,9
if (!enable) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
644,19 → 781,21
}
 
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
WARN(val & VIDEO_DIP_ENABLE,
"DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
 
val |= VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 
if (intel_hdmi_set_gcp_infoframe(encoder))
val |= VIDEO_DIP_ENABLE_GCP;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
 
667,24 → 806,28
 
static void hsw_set_infoframes(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
u32 val = I915_READ(reg);
 
assert_hdmi_port_disabled(intel_hdmi);
 
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
 
if (!enable) {
I915_WRITE(reg, 0);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
 
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
if (intel_hdmi_set_gcp_infoframe(encoder))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
700,23 → 843,23
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 hdmi_val;
 
hdmi_val = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev))
hdmi_val |= intel_hdmi->color_range;
if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
 
if (crtc->config.pipe_bpp > 24)
if (crtc->config->pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
 
if (crtc->config.has_hdmi_sink)
if (crtc->config->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
 
if (HAS_PCH_CPT(dev))
759,7 → 902,7
}
 
static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
792,7 → 935,7
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
 
pipe_config->adjusted_mode.flags |= flags;
pipe_config->base.adjusted_mode.flags |= flags;
 
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
dotclock = pipe_config->port_clock * 2 / 3;
799,59 → 942,147
else
dotclock = pipe_config->port_clock;
 
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
 
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
 
static void intel_enable_hdmi(struct intel_encoder *encoder)
static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
WARN_ON(!crtc->config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder);
}
 
static void g4x_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
u32 enable_bits = SDVO_ENABLE;
 
if (intel_crtc->config.has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->hdmi_reg);
 
temp |= SDVO_ENABLE;
if (crtc->config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
 
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
 
if (crtc->config->has_audio)
intel_enable_hdmi_audio(encoder);
}
 
static void ibx_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
 
temp = I915_READ(intel_hdmi->hdmi_reg);
 
/* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it, so restore the transcoder select bit here. */
if (HAS_PCH_IBX(dev))
enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
temp |= SDVO_ENABLE;
if (crtc->config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
 
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
 
/*
* HW workaround, need to toggle enable bit off and on
* for 12bpc with pixel repeat.
*
* FIXME: BSpec says this should be done at the end of
* of the modeset sequence, so not sure if this isn't too soon.
*/
if (crtc->config->pipe_bpp > 24 &&
crtc->config->pixel_multiplier > 1) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
 
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
 
temp |= enable_bits;
if (crtc->config->has_audio)
intel_enable_hdmi_audio(encoder);
}
 
static void cpt_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum pipe pipe = crtc->pipe;
u32 temp;
 
temp = I915_READ(intel_hdmi->hdmi_reg);
 
temp |= SDVO_ENABLE;
if (crtc->config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
 
/*
* WaEnableHDMI8bpcBefore12bpc:snb,ivb
*
* The procedure for 12bpc is as follows:
* 1. disable HDMI clock gating
* 2. enable HDMI with 8bpc
* 3. enable HDMI with 12bpc
* 4. enable HDMI clock gating
*/
 
if (crtc->config->pipe_bpp > 24) {
I915_WRITE(TRANS_CHICKEN1(pipe),
I915_READ(TRANS_CHICKEN1(pipe)) |
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
 
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
}
 
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
 
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
if (crtc->config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
 
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
 
I915_WRITE(TRANS_CHICKEN1(pipe),
I915_READ(TRANS_CHICKEN1(pipe)) &
~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
 
if (intel_crtc->config.has_audio) {
WARN_ON(!intel_crtc->config.has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
intel_audio_codec_enable(encoder);
if (crtc->config->has_audio)
intel_enable_hdmi_audio(encoder);
}
}
 
static void vlv_enable_hdmi(struct intel_encoder *encoder)
{
864,61 → 1095,63
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
 
if (crtc->config.has_audio)
intel_audio_codec_disable(encoder);
 
temp = I915_READ(intel_hdmi->hdmi_reg);
 
/* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */
if (HAS_PCH_IBX(dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
temp &= ~(SDVO_ENABLE | SDVO_AUDIO_ENABLE);
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
 
if (temp & SDVO_PIPE_B_SELECT) {
/*
* HW workaround for IBX, we need to move the port
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
temp &= ~SDVO_PIPE_B_SELECT;
temp |= SDVO_ENABLE;
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
 
/* Again we need to write this twice. */
temp &= ~SDVO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
 
/* Transcoder selection bits only update
* effectively on vblank. */
if (crtc)
intel_wait_for_vblank(dev, pipe);
else
msleep(50);
}
}
 
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
}
 
temp &= ~enable_bits;
static void g4x_disable_hdmi(struct intel_encoder *encoder)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
 
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
intel_disable_hdmi(encoder);
}
 
static void pch_disable_hdmi(struct intel_encoder *encoder)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
}
 
static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
static void pch_post_disable_hdmi(struct intel_encoder *encoder)
{
intel_disable_hdmi(encoder);
}
 
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
930,39 → 1163,74
}
 
static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
if (clock < 25000)
return MODE_CLOCK_LOW;
if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
return MODE_CLOCK_HIGH;
 
/* BXT DPLL can't generate 223-240 MHz */
if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
 
/* CHV DPLL can't generate 216-240 MHz */
if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
 
return MODE_OK;
}
 
static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int clock = mode->clock;
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
enum drm_mode_status status;
int clock;
 
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
 
clock = mode->clock;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
 
if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
true))
return MODE_CLOCK_HIGH;
if (clock < 20000)
return MODE_CLOCK_LOW;
/* check if we can do 8bpc */
status = hdmi_port_clock_valid(hdmi, clock, true);
 
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* if we can't do 8bpc we may still be able to do 12bpc */
if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
 
return MODE_OK;
return status;
}
 
static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_atomic_state *state;
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int count = 0, count_hdmi = 0;
int i;
 
if (HAS_GMCH_DISPLAY(dev))
return false;
 
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
state = crtc_state->base.state;
 
for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
 
encoder = to_intel_encoder(connector_state->best_encoder);
 
count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
count++;
}
975,13 → 1243,13
}
 
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
 
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
991,20 → 1259,20
 
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
else
intel_hdmi->color_range = 0;
pipe_config->limited_color_range =
pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1;
} else {
pipe_config->limited_color_range =
intel_hdmi->limited_color_range;
}
 
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
pipe_config->pixel_multiplier = 2;
clock_8bpc *= 2;
clock_12bpc *= 2;
}
 
if (intel_hdmi->color_range)
pipe_config->limited_color_range = true;
 
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
pipe_config->has_pch_encoder = true;
 
1018,8 → 1286,8
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
hdmi_12bpc_possible(encoder->new_crtc)) {
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
 
1028,6 → 1296,8
} else {
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;
 
pipe_config->port_clock = clock_8bpc;
}
 
if (!pipe_config->bw_constrained) {
1035,11 → 1305,15
pipe_config->pipe_bpp = desired_bpp;
}
 
if (adjusted_mode->crtc_clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
false) != MODE_OK) {
DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
return false;
}
 
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
 
return true;
}
 
1057,24 → 1331,21
}
 
static bool
intel_hdmi_set_edid(struct drm_connector *connector)
intel_hdmi_set_edid(struct drm_connector *connector, bool force)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *intel_encoder =
&hdmi_to_dig_port(intel_hdmi)->base;
enum intel_display_power_domain power_domain;
struct edid *edid;
struct edid *edid = NULL;
bool connected = false;
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
if (force)
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
 
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
1100,13 → 1371,29
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool live_status = false;
unsigned int try;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
 
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
for (try = 0; !live_status && try < 9; try++) {
if (try)
msleep(10);
live_status = intel_digital_port_connected(dev_priv,
hdmi_to_dig_port(intel_hdmi));
}
 
if (!live_status)
DRM_DEBUG_KMS("Live status not up!");
 
intel_hdmi_unset_edid(connector);
 
if (intel_hdmi_set_edid(connector)) {
if (intel_hdmi_set_edid(connector, live_status)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1114,6 → 1401,8
} else
status = connector_status_disconnected;
 
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
return status;
}
 
1130,7 → 1419,7
if (connector->status != connector_status_connected)
return;
 
intel_hdmi_set_edid(connector);
intel_hdmi_set_edid(connector, true);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
 
1196,7 → 1485,7
 
if (property == dev_priv->broadcast_rgb_property) {
bool old_auto = intel_hdmi->color_range_auto;
uint32_t old_range = intel_hdmi->color_range;
bool old_range = intel_hdmi->limited_color_range;
 
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
1204,11 → 1493,11
break;
case INTEL_BROADCAST_RGB_FULL:
intel_hdmi->color_range_auto = false;
intel_hdmi->color_range = 0;
intel_hdmi->limited_color_range = false;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_hdmi->color_range_auto = false;
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
intel_hdmi->limited_color_range = true;
break;
default:
return -EINVAL;
1215,7 → 1504,7
}
 
if (old_auto == intel_hdmi->color_range_auto &&
old_range == intel_hdmi->color_range)
old_range == intel_hdmi->limited_color_range)
return 0;
 
goto done;
1251,13 → 1540,12
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
 
intel_hdmi_prepare(encoder);
 
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config.has_hdmi_sink,
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
}
 
1269,14 → 1557,13
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
u32 val;
 
/* Enable clock channels for this port */
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
1299,15 → 1586,15
/* Program lane clock */
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config.has_hdmi_sink,
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
 
intel_enable_hdmi(encoder);
g4x_enable_hdmi(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
 
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1323,7 → 1610,7
intel_hdmi_prepare(encoder);
 
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
1340,9 → 1627,53
 
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
}
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
if (crtc->config->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
}
}
 
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1356,8 → 1687,21
 
intel_hdmi_prepare(encoder);
 
mutex_lock(&dev_priv->dpio_lock);
/*
* Must trick the second common lane into life.
* Otherwise we can't even access the PLL.
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
dport->release_cl2_override =
!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
 
chv_phy_powergate_lanes(encoder, true, 0x0);
 
mutex_lock(&dev_priv->sb_lock);
 
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
 
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1406,9 → 1750,42
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
u32 val;
 
mutex_lock(&dev_priv->sb_lock);
 
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
} else {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
 
mutex_unlock(&dev_priv->sb_lock);
 
/*
* Leave the power down bit cleared for at least one
* lane so that chv_powergate_phy_ch() will power
* on something when the channel is otherwise unused.
* When the port is off and the override is removed
* the lanes power down anyway, so otherwise it doesn't
* really matter what the state of power down bits is
* after this.
*/
chv_phy_powergate_lanes(encoder, false, 0x0);
}
 
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1419,43 → 1796,23
int pipe = intel_crtc->pipe;
 
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* Propagate soft reset to data lane reset */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1466,14 → 1823,13
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
int data, i, stagger;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
 
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1484,30 → 1840,8
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
 
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
 
/* Program Tx latency optimal setting */
for (i = 0; i < 4; i++) {
/* Set the latency optimal bit */
data = (i == 1) ? 0x0 : 0x6;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
data << DPIO_FRC_LATENCY_SHFIT);
 
/* Set the upar bit */
data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
1515,8 → 1849,42
}
 
/* Data lane stagger programming */
/* FIXME: Fix up value only after power analysis */
if (intel_crtc->config->port_clock > 270000)
stagger = 0x18;
else if (intel_crtc->config->port_clock > 135000)
stagger = 0xd;
else if (intel_crtc->config->port_clock > 67500)
stagger = 0x7;
else if (intel_crtc->config->port_clock > 33750)
stagger = 0x4;
else
stagger = 0x2;
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
 
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
 
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
DPIO_TX1_STAGGER_MULT(7) |
DPIO_TX2_STAGGER_MULT(5));
 
/* Deassert data lane reset */
chv_data_lane_soft_reset(encoder, false);
 
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
1551,12 → 1919,27
 
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
 
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
 
/*
* Supposedly this value shouldn't matter when unique transition
* scale is disabled, but in fact it does matter. Let's just
* always program the same value and hope it's OK.
*/
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
 
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
 
/* Disable unique transition scale */
/*
* The document said it needs to set bit 27 for ch0 and bit 26
* for ch1. Might be a typo in the doc.
* For now, for this unique transition scale selection, set bit
* 27 for ch0 and ch1.
*/
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
1563,19 → 1946,6
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
 
/* Additional steps for 1200mV-0dB */
#if 0
val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
if (ch)
val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
else
val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
 
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
(0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
#endif
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
1585,21 → 1955,22
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
/* LRC Bypass */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
val |= DPIO_LRC_BYPASS;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
mutex_unlock(&dev_priv->sb_lock);
 
mutex_unlock(&dev_priv->dpio_lock);
 
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config.has_hdmi_sink,
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
 
intel_enable_hdmi(encoder);
g4x_enable_hdmi(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
vlv_wait_port_ready(dev_priv, dport, 0x0);
 
/* Second common lane will stay alive on its own now */
if (dport->release_cl2_override) {
chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
dport->release_cl2_override = false;
}
}
 
static void intel_hdmi_destroy(struct drm_connector *connector)
{
1609,12 → 1980,15
}
 
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = intel_connector_dpms,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_hdmi_detect,
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
 
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
1628,15 → 2002,6
};
 
static void
intel_attach_aspect_ratio_property(struct drm_connector *connector)
{
if (!drm_mode_create_aspect_ratio_property(connector->dev))
drm_object_attach_property(&connector->base,
connector->dev->mode_config.aspect_ratio_property,
DRM_MODE_PICTURE_ASPECT_NONE);
}
 
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
intel_attach_force_audio_property(connector);
1655,6 → 2020,7
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
 
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
1666,20 → 2032,55
 
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
intel_encoder->hpd_pin = HPD_PORT_A;
else
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
if (IS_CHERRYVIEW(dev))
intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
if (WARN_ON(IS_BROXTON(dev_priv)))
intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
else if (IS_CHERRYVIEW(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
else
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
/* On SKL PORT E doesn't have seperate GMBUS pin
* We rely on VBT to set a proper alternate GMBUS pin. */
alternate_ddc_pin =
dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
switch (alternate_ddc_pin) {
case DDC_PIN_B:
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
break;
case DDC_PIN_C:
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
break;
case DDC_PIN_D:
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(alternate_ddc_pin);
}
intel_encoder->hpd_pin = HPD_PORT_E;
break;
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
/* Internal port only for eDP. */
1719,6 → 2120,7
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_connector_register(connector);
intel_hdmi->attached_connector = intel_connector;
 
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
1740,7 → 2142,7
if (!intel_dig_port)
return;
 
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(intel_dig_port);
return;
1752,7 → 2154,12
DRM_MODE_ENCODER_TMDS);
 
intel_encoder->compute_config = intel_hdmi_compute_config;
intel_encoder->disable = intel_disable_hdmi;
if (HAS_PCH_SPLIT(dev)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
intel_encoder->disable = g4x_disable_hdmi;
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) {
1760,6 → 2167,7
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
} else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
1767,7 → 2175,12
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->enable = intel_enable_hdmi;
if (HAS_PCH_CPT(dev))
intel_encoder->enable = cpt_enable_hdmi;
else if (HAS_PCH_IBX(dev))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
}
 
intel_encoder->type = INTEL_OUTPUT_HDMI;
/drivers/video/drm/i915/intel_i2c.c
34,20 → 34,71
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
struct gmbus_port {
struct gmbus_pin {
const char *name;
int reg;
};
 
static const struct gmbus_port gmbus_ports[] = {
{ "ssc", GPIOB },
{ "vga", GPIOA },
{ "panel", GPIOC },
{ "dpc", GPIOD },
{ "dpb", GPIOE },
{ "dpd", GPIOF },
/* Map gmbus pin pairs to names and registers. */
static const struct gmbus_pin gmbus_pins[] = {
[GMBUS_PIN_SSC] = { "ssc", GPIOB },
[GMBUS_PIN_VGADDC] = { "vga", GPIOA },
[GMBUS_PIN_PANEL] = { "panel", GPIOC },
[GMBUS_PIN_DPC] = { "dpc", GPIOD },
[GMBUS_PIN_DPB] = { "dpb", GPIOE },
[GMBUS_PIN_DPD] = { "dpd", GPIOF },
};
 
static const struct gmbus_pin gmbus_pins_bdw[] = {
[GMBUS_PIN_VGADDC] = { "vga", GPIOA },
[GMBUS_PIN_DPC] = { "dpc", GPIOD },
[GMBUS_PIN_DPB] = { "dpb", GPIOE },
[GMBUS_PIN_DPD] = { "dpd", GPIOF },
};
 
static const struct gmbus_pin gmbus_pins_skl[] = {
[GMBUS_PIN_DPC] = { "dpc", GPIOD },
[GMBUS_PIN_DPB] = { "dpb", GPIOE },
[GMBUS_PIN_DPD] = { "dpd", GPIOF },
};
 
static const struct gmbus_pin gmbus_pins_bxt[] = {
[GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB },
[GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC },
[GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD },
};
 
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
if (IS_BROXTON(dev_priv))
return &gmbus_pins_bxt[pin];
else if (IS_SKYLAKE(dev_priv))
return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin];
else
return &gmbus_pins[pin];
}
 
bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
unsigned int size;
 
if (IS_BROXTON(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw);
else
size = ARRAY_SIZE(gmbus_pins);
 
return pin < size && get_gmbus_pin(dev_priv, pin)->reg;
}
 
/* Intel GPIO access functions */
 
#define I2C_RISEFALL_TIME 10
63,8 → 114,8
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
I915_WRITE(GMBUS0, 0);
I915_WRITE(GMBUS4, 0);
}
 
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
182,7 → 233,7
}
 
static void
intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
struct i2c_algo_bit_data *algo;
189,8 → 240,8
 
algo = &bus->bit_algo;
 
/* -1 to map pin pair to gmbus index */
bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
bus->gpio_reg = dev_priv->gpio_mmio_base +
get_gmbus_pin(dev_priv, pin)->reg;
 
bus->adapter.algo_data = algo;
algo->setsda = set_data;
210,7 → 261,6
u32 gmbus4_irq_en)
{
int i;
int reg_offset = dev_priv->gpio_mmio_base;
u32 gmbus2 = 0;
DEFINE_WAIT(wait);
 
220,13 → 270,13
/* Important: The hw handles only the first bit, so set only one! Since
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves. */
I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
I915_WRITE(GMBUS4, gmbus4_irq_en);
 
for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
 
gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
gmbus2 = I915_READ_NOTRACE(GMBUS2);
if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
break;
 
234,7 → 284,7
}
finish_wait(&dev_priv->gmbus_wait_queue, &wait);
 
I915_WRITE(GMBUS4 + reg_offset, 0);
I915_WRITE(GMBUS4, 0);
 
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
247,20 → 297,19
gmbus_wait_idle(struct drm_i915_private *dev_priv)
{
int ret;
int reg_offset = dev_priv->gpio_mmio_base;
 
#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
 
if (!HAS_GMBUS_IRQ(dev_priv->dev))
return wait_for(C, 10);
 
/* Important: The hw handles only the first bit, so set only one! */
I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
 
ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
 
I915_WRITE(GMBUS4 + reg_offset, 0);
I915_WRITE(GMBUS4, 0);
 
if (ret)
return 0;
270,18 → 319,15
}
 
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
u16 len = msg->len;
u8 *buf = msg->buf;
 
I915_WRITE(GMBUS1 + reg_offset,
I915_WRITE(GMBUS1,
gmbus1_index |
GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
292,7 → 338,7
if (ret)
return ret;
 
val = I915_READ(GMBUS3 + reg_offset);
val = I915_READ(GMBUS3);
do {
*buf++ = val & 0xff;
val >>= 8;
303,11 → 349,34
}
 
static int
gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
u16 len = msg->len;
u8 *buf = msg->buf;
unsigned int rx_size = msg->len;
unsigned int len;
int ret;
 
do {
len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
 
ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
buf, len, gmbus1_index);
if (ret)
return ret;
 
rx_size -= len;
buf += len;
} while (rx_size != 0);
 
return 0;
}
 
static int
gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
unsigned short addr, u8 *buf, unsigned int len)
{
unsigned int chunk_size = len;
u32 val, loop;
 
val = loop = 0;
316,11 → 385,11
len -= 1;
}
 
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
I915_WRITE(GMBUS3, val);
I915_WRITE(GMBUS1,
GMBUS_CYCLE_WAIT |
(msg->len << GMBUS_BYTE_COUNT_SHIFT) |
(msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
(chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
330,7 → 399,7
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
 
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS3, val);
 
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
GMBUS_HW_RDY_EN);
337,9 → 406,32
if (ret)
return ret;
}
 
return 0;
}
 
static int
gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
{
u8 *buf = msg->buf;
unsigned int tx_size = msg->len;
unsigned int len;
int ret;
 
do {
len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
 
ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
if (ret)
return ret;
 
buf += len;
tx_size -= len;
} while (tx_size != 0);
 
return 0;
}
 
/*
* The gmbus controller can combine a 1 or 2 byte write with a read that
* immediately follows it by using an "INDEX" cycle.
355,7 → 447,6
static int
gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
{
int reg_offset = dev_priv->gpio_mmio_base;
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
int ret;
369,13 → 460,13
 
/* GMBUS5 holds 16-bit index */
if (gmbus5)
I915_WRITE(GMBUS5 + reg_offset, gmbus5);
I915_WRITE(GMBUS5, gmbus5);
 
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
 
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
I915_WRITE(GMBUS5 + reg_offset, 0);
I915_WRITE(GMBUS5, 0);
 
return ret;
}
389,10 → 480,10
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
int i, reg_offset;
int i = 0, inc, try = 0;
int ret = 0;
 
intel_aux_display_runtime_get(dev_priv);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
 
if (bus->force_bit) {
400,14 → 491,14
goto out;
}
 
reg_offset = dev_priv->gpio_mmio_base;
retry:
I915_WRITE(GMBUS0, bus->reg0);
 
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
for (i = 0; i < num; i++) {
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
inc = 2; /* an index read is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
431,7 → 522,7
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
 
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
442,7 → 533,7
adapter->name);
ret = -ETIMEDOUT;
}
I915_WRITE(GMBUS0 + reg_offset, 0);
I915_WRITE(GMBUS0, 0);
ret = ret ?: i;
goto out;
 
471,20 → 562,32
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1 + reg_offset, 0);
I915_WRITE(GMBUS0 + reg_offset, 0);
I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1, 0);
I915_WRITE(GMBUS0, 0);
 
DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
/*
* Passive adapters sometimes NAK the first probe. Retry the first
* message once on -ENXIO for GMBUS transfers; the bit banging algorithm
* has retries internally. See also the retry loop in
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
adapter->name);
goto retry;
}
 
goto out;
 
timeout:
DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
I915_WRITE(GMBUS0, 0);
 
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = 1;
492,7 → 595,9
 
out:
mutex_unlock(&dev_priv->gmbus_mutex);
intel_aux_display_runtime_put(dev_priv);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
return ret;
}
 
517,7 → 622,9
int intel_setup_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
struct intel_gmbus *bus;
unsigned int pin;
int ret;
 
if (HAS_PCH_NOP(dev))
return 0;
531,16 → 638,18
mutex_init(&dev_priv->gmbus_mutex);
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
 
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
u32 port = i + 1; /* +1 to map gmbus index to pin pair */
for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
continue;
 
bus = &dev_priv->gmbus[pin];
 
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s",
gmbus_ports[i].name);
get_gmbus_pin(dev_priv, pin)->name);
 
bus->adapter.dev.parent = &dev->pdev->dev;
bus->dev_priv = dev_priv;
548,13 → 657,13
bus->adapter.algo = &gmbus_algorithm;
 
/* By default use a conservative clock rate */
bus->reg0 = port | GMBUS_RATE_100KHZ;
bus->reg0 = pin | GMBUS_RATE_100KHZ;
 
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
bus->force_bit = 1;
 
intel_gpio_setup(bus, port);
intel_gpio_setup(bus, pin);
 
ret = i2c_add_adapter(&bus->adapter);
if (ret)
566,8 → 675,11
return 0;
 
err:
while (--i) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
while (--pin) {
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
continue;
 
bus = &dev_priv->gmbus[pin];
i2c_del_adapter(&bus->adapter);
}
return ret;
574,12 → 686,12
}
 
struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned port)
unsigned int pin)
{
WARN_ON(!intel_gmbus_is_port_valid(port));
/* -1 to map pin pair to gmbus index */
return (intel_gmbus_is_port_valid(port)) ?
&dev_priv->gmbus[port - 1].adapter : NULL;
if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
return NULL;
 
return &dev_priv->gmbus[pin].adapter;
}
 
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
602,10 → 714,14
void intel_teardown_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
struct intel_gmbus *bus;
unsigned int pin;
 
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
continue;
 
bus = &dev_priv->gmbus[pin];
i2c_del_adapter(&bus->adapter);
}
}
/drivers/video/drm/i915/intel_lrc.c
135,7 → 135,7
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_mocs.h"
 
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
189,13 → 189,28
#define GEN8_CTX_FORCE_RESTORE (1<<2)
#define GEN8_CTX_L3LLC_COHERENT (1<<5)
#define GEN8_CTX_PRIVILEGE (1<<8)
 
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
}
 
#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \
reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
}
 
enum {
ADVANCED_CONTEXT = 0,
LEGACY_CONTEXT,
LEGACY_32B_CONTEXT,
ADVANCED_AD_CONTEXT,
LEGACY_64B_CONTEXT
};
#define GEN8_CTX_MODE_SHIFT 3
#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
LEGACY_64B_CONTEXT :\
LEGACY_32B_CONTEXT)
enum {
FAULT_AND_HANG = 0,
FAULT_AND_HALT, /* Debug only */
203,10 → 218,13
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
 
static int intel_lr_context_pin(struct intel_engine_cs *ring,
struct intel_context *ctx);
static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
struct drm_i915_gem_object *default_ctx_obj);
 
 
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
* @dev: DRM device.
213,8 → 231,7
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
* support for Logical Ring Contexts and Aliasing PPGTT or better),
* and only when enabled via module parameter.
* support for Logical Ring Contexts and Aliasing PPGTT or better).
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
222,6 → 239,12
{
WARN_ON(i915.enable_ppgtt == -1);
 
/* On platforms with execlist available, vGPU will only
* support execlist mode, no ring buffer mode.
*/
if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
return 1;
 
if (INTEL_INFO(dev)->gen >= 9)
return 1;
 
249,7 → 272,8
*/
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
{
u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE;
 
/* LRCA is required to be 4K aligned so the more significant 20 bits
* are globally unique */
256,15 → 280,28
return lrca >> 12;
}
 
static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
 
return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
(ring->id == VCS || ring->id == VCS2);
}
 
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE;
 
WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
 
desc = GEN8_CTX_VALID;
desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(ctx_obj->base.dev))
desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= GEN8_CTX_PRIVILEGE;
desc |= lrca;
274,143 → 311,109
* signalling between Command Streamers */
/* desc |= GEN8_CTX_FORCE_RESTORE; */
 
/* WaEnableForceRestoreInCtxtDescForVCS:skl */
/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
if (disable_lite_restore_wa(ring))
desc |= GEN8_CTX_FORCE_RESTORE;
 
return desc;
}
 
static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj0,
struct drm_i915_gem_object *ctx_obj1)
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
 
struct intel_engine_cs *ring = rq0->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
unsigned long flags;
uint64_t desc[2];
 
/* XXX: You must always write both descriptors in the order below. */
if (ctx_obj1)
temp = execlists_ctx_descriptor(ctx_obj1);
else
temp = 0;
desc[1] = (u32)(temp >> 32);
desc[0] = (u32)temp;
 
temp = execlists_ctx_descriptor(ctx_obj0);
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
 
/* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
* are in progress.
*
* The other problem is that we can't just call gen6_gt_force_wake_get()
* because that function calls intel_runtime_pm_get(), which might sleep.
* Instead, we do the runtime_pm_get/put when creating/destroying requests.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
if (INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_blittercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_BLITTER);
}
if (rq1) {
desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
rq1->elsp_submitted++;
} else {
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_ALL);
desc[1] = 0;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
 
I915_WRITE(RING_ELSP(ring), desc[1]);
I915_WRITE(RING_ELSP(ring), desc[0]);
I915_WRITE(RING_ELSP(ring), desc[3]);
/* The context is automatically loaded after the following */
I915_WRITE(RING_ELSP(ring), desc[2]);
desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
rq0->elsp_submitted++;
 
/* ELSP is a wo register, so use another nearby reg for posting instead */
POSTING_READ(RING_EXECLIST_STATUS(ring));
/* You must always write both descriptors in the order below. */
spin_lock(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
 
/* Release Force Wakeup (see the big comment above). */
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
if (INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_blittercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_BLITTER);
}
} else {
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_ALL);
}
I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
/* The context is automatically loaded after the following */
I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
/* ELSP is a wo register, use another nearby reg for posting */
POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
spin_unlock(&dev_priv->uncore.lock);
}
 
static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
struct drm_i915_gem_object *ring_obj,
u32 tail)
static int execlists_update_context(struct drm_i915_gem_request *rq)
{
struct intel_engine_cs *ring = rq->ring;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
struct page *page;
uint32_t *reg_state;
 
page = i915_gem_object_get_page(ctx_obj, 1);
BUG_ON(!ctx_obj);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
reg_state[CTX_RING_TAIL+1] = tail;
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
reg_state[CTX_RING_TAIL+1] = rq->tail;
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
 
kunmap_atomic(reg_state);
 
return 0;
}
 
static void execlists_submit_contexts(struct intel_engine_cs *ring,
struct intel_context *to0, u32 tail0,
struct intel_context *to1, u32 tail1)
static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
struct drm_i915_gem_object *ctx_obj1 = NULL;
struct intel_ringbuffer *ringbuf1 = NULL;
execlists_update_context(rq0);
 
BUG_ON(!ctx_obj0);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
if (rq1)
execlists_update_context(rq1);
 
execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
 
if (to1) {
ringbuf1 = to1->engine[ring->id].ringbuf;
ctx_obj1 = to1->engine[ring->id].state;
BUG_ON(!ctx_obj1);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
 
execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
execlists_elsp_write(rq0, rq1);
}
 
execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
}
 
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
 
assert_spin_locked(&ring->execlist_lock);
 
/*
* If irqs are not active generate a warning as batches that finish
* without the irqs may get lost and a GPU Hang may occur.
*/
WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
 
if (list_empty(&ring->execlist_queue))
return;
 
433,26 → 436,40
}
}
 
if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
/*
* WaIdleLiteRestore: make sure we never cause a lite
* restore with HEAD==TAIL
*/
if (req0->elsp_submitted) {
/*
* Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
* as we resubmit the request. See gen8_emit_request()
* for where we prepare the padding after the end of the
* request.
*/
struct intel_ringbuffer *ringbuf;
 
ringbuf = req0->ctx->engine[ring->id].ringbuf;
req0->tail += 8;
req0->tail &= ringbuf->size - 1;
}
}
 
WARN_ON(req1 && req1->elsp_submitted);
 
execlists_submit_contexts(ring, req0->ctx, req0->tail,
req1 ? req1->ctx : NULL,
req1 ? req1->tail : 0);
 
req0->elsp_submitted++;
if (req1)
req1->elsp_submitted++;
execlists_submit_requests(req0, req1);
}
 
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
struct intel_ctx_submit_request *head_req;
struct drm_i915_gem_request *head_req;
 
assert_spin_locked(&ring->execlist_lock);
 
head_req = list_first_entry_or_null(&ring->execlist_queue,
struct intel_ctx_submit_request,
struct drm_i915_gem_request,
execlist_link);
 
if (head_req != NULL) {
475,19 → 492,19
}
 
/**
* intel_execlists_handle_ctx_events() - handle Context Switch interrupts
* intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
void intel_lrc_irq_handler(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 status_pointer;
u8 read_pointer;
u8 write_pointer;
u32 status;
u32 status = 0;
u32 status_id;
u32 submit_contexts = 0;
 
494,19 → 511,20
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & 0x07;
write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
if (read_pointer > write_pointer)
write_pointer += 6;
write_pointer += GEN8_CSB_ENTRIES;
 
spin_lock(&ring->execlist_lock);
 
while (read_pointer < write_pointer) {
read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8);
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8 + 4);
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
 
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
 
if (status & GEN8_CTX_STATUS_PREEMPTED) {
if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
if (execlists_check_remove_request(ring, status_id))
522,55 → 540,51
}
}
 
if (submit_contexts != 0)
if (disable_lite_restore_wa(ring)) {
/* Prevent a ctx to preempt itself */
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
(submit_contexts != 0))
execlists_context_unqueue(ring);
} else if (submit_contexts != 0) {
execlists_context_unqueue(ring);
}
 
spin_unlock(&ring->execlist_lock);
 
WARN(submit_contexts > 2, "More than two context complete events?\n");
ring->next_context_status_buffer = write_pointer % 6;
ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
((u32)ring->next_context_status_buffer & 0x07) << 8);
_MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
((u32)ring->next_context_status_buffer &
GEN8_CSB_PTR_MASK) << 8));
}
 
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail)
static int execlists_context_queue(struct drm_i915_gem_request *request)
{
struct intel_ctx_submit_request *req = NULL, *cursor;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
struct intel_engine_cs *ring = request->ring;
struct drm_i915_gem_request *cursor;
int num_elements = 0;
 
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (req == NULL)
return -ENOMEM;
req->ctx = to;
i915_gem_context_reference(req->ctx);
if (request->ctx != ring->default_context)
intel_lr_context_pin(request);
 
if (to != ring->default_context)
intel_lr_context_pin(ring, to);
i915_gem_request_reference(request);
 
req->ring = ring;
req->tail = tail;
spin_lock_irq(&ring->execlist_lock);
 
intel_runtime_pm_get(dev_priv);
 
spin_lock_irqsave(&ring->execlist_lock, flags);
 
list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
if (++num_elements > 2)
break;
 
if (num_elements > 2) {
struct intel_ctx_submit_request *tail_req;
struct drm_i915_gem_request *tail_req;
 
tail_req = list_last_entry(&ring->execlist_queue,
struct intel_ctx_submit_request,
struct drm_i915_gem_request,
execlist_link);
 
if (to == tail_req->ctx) {
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
579,18 → 593,18
}
}
 
list_add_tail(&req->execlist_link, &ring->execlist_queue);
list_add_tail(&request->execlist_link, &ring->execlist_queue);
if (num_elements == 0)
execlists_context_unqueue(ring);
 
spin_unlock_irqrestore(&ring->execlist_lock, flags);
spin_unlock_irq(&ring->execlist_lock);
 
return 0;
}
 
static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct intel_engine_cs *ring = req->ring;
uint32_t flush_domains;
int ret;
 
598,7 → 612,7
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
 
ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
 
606,10 → 620,10
return 0;
}
 
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
struct intel_engine_cs *ring = ringbuf->ring;
const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
618,9 → 632,11
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
 
ret = i915_gem_object_sync(obj, ring);
if (obj->active & other_rings) {
ret = i915_gem_object_sync(obj, req->ring, &req);
if (ret)
return ret;
}
 
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false);
634,10 → 650,205
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
return logical_ring_invalidate_all_caches(ringbuf);
return logical_ring_invalidate_all_caches(req);
}
 
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
int ret;
 
request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
 
if (request->ctx != request->ring->default_context) {
ret = intel_lr_context_pin(request);
if (ret)
return ret;
}
 
return 0;
}
 
static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
int bytes)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
struct intel_engine_cs *ring = req->ring;
struct drm_i915_gem_request *target;
unsigned space;
int ret;
 
if (intel_ring_space(ringbuf) >= bytes)
return 0;
 
/* The whole point of reserving space is to not wait! */
WARN_ON(ringbuf->reserved_in_use);
 
list_for_each_entry(target, &ring->request_list, list) {
/*
* The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that
* aren't from the ringbuffer we're considering.
*/
if (target->ringbuf != ringbuf)
continue;
 
/* Would completion of this request free enough space? */
space = __intel_ring_space(target->postfix, ringbuf->tail,
ringbuf->size);
if (space >= bytes)
break;
}
 
if (WARN_ON(&target->list == &ring->request_list))
return -ENOSPC;
 
ret = i915_wait_request(target);
if (ret)
return ret;
 
ringbuf->space = space;
return 0;
}
 
/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @request: Request to advance the logical ringbuffer of.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
* really happens during submission is that the context and current tail will be placed
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static void
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = request->ring;
struct drm_i915_private *dev_priv = request->i915;
 
intel_logical_ring_advance(request->ringbuf);
 
request->tail = request->ringbuf->tail;
 
if (intel_ring_stopped(ring))
return;
 
if (dev_priv->guc.execbuf_client)
i915_guc_submit(dev_priv->guc.execbuf_client, request);
else
execlists_context_queue(request);
}
 
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
 
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
 
ringbuf->tail = 0;
intel_ring_update_space(ringbuf);
}
 
static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
int remain_usable = ringbuf->effective_size - ringbuf->tail;
int remain_actual = ringbuf->size - ringbuf->tail;
int ret, total_bytes, wait_bytes = 0;
bool need_wrap = false;
 
if (ringbuf->reserved_in_use)
total_bytes = bytes;
else
total_bytes = bytes + ringbuf->reserved_size;
 
if (unlikely(bytes > remain_usable)) {
/*
* Not enough space for the basic request. So need to flush
* out the remainder and then wait for base + reserved.
*/
wait_bytes = remain_actual + total_bytes;
need_wrap = true;
} else {
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
* falls off the end. So only need to to wait for the
* reserved size after flushing out the remainder.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
}
}
 
if (wait_bytes) {
ret = logical_ring_wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
 
if (need_wrap)
__wrap_ring_buffer(ringbuf);
}
 
return 0;
}
 
/**
* intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
*
* @req: The request to start some new work for
* @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
*
* The ringbuffer might not be ready to accept the commands right away (maybe it needs to
* be wrapped, or wait a bit for the tail to be updated). This function takes care of that
* and also preallocates a request (every workload submission is still mediated through
* requests, same as it did with legacy ringbuffer submission).
*
* Return: non-zero if the ringbuffer is not ready to be written to.
*/
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
struct drm_i915_private *dev_priv;
int ret;
 
WARN_ON(req == NULL);
dev_priv = req->ring->dev->dev_private;
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
 
ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
 
req->ringbuf->space -= num_dwords * sizeof(uint32_t);
return 0;
}
 
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
{
/*
* The first call merely notes the reserve request and is common for
* all back ends. The subsequent localised _begin() call actually
* ensures that the reservation is available. Without the begin, if
* the request creator immediately submitted the request without
* adding any commands to it then there might not actually be
* sufficient room for the submission commands.
*/
intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
 
return intel_logical_ring_begin(request, 0);
}
 
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
* @dev: DRM device.
* @file: DRM file.
647,7 → 858,7
* @vmas: list of vmas.
* @batch_obj: the batchbuffer to submit.
* @exec_start: batchbuffer start virtual address pointer.
* @flags: translated execbuffer call flags.
* @dispatch_flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
654,16 → 865,15
*
* Return: non-zero if the submission fails.
*/
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *ring = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
u64 exec_start;
int instp_mode;
u32 instp_mask;
int ret;
694,33 → 904,18
return -EINVAL;
}
 
if (args->num_cliprects != 0) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
} else {
if (args->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
args->DR4 = 0;
}
 
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
}
}
 
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
DRM_DEBUG("sol reset is gen7 only\n");
return -EINVAL;
}
 
ret = execlists_move_to_gpu(ringbuf, vmas);
ret = execlists_move_to_gpu(params->request, vmas);
if (ret)
return ret;
 
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_logical_ring_begin(ringbuf, 4);
ret = intel_logical_ring_begin(params->request, 4);
if (ret)
return ret;
 
733,21 → 928,24
dev_priv->relative_constants_mode = instp_mode;
}
 
ret = ring->emit_bb_start(ringbuf, exec_start, flags);
exec_start = params->batch_obj_vm_offset +
args->batch_start_offset;
 
ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
if (ret)
return ret;
 
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
i915_gem_execbuffer_move_to_active(vmas, params->request);
i915_gem_execbuffer_retire_commands(params);
 
return 0;
}
 
void intel_execlists_retire_requests(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req, *tmp;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
struct drm_i915_gem_request *req, *tmp;
struct list_head retired_list;
 
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
755,9 → 953,9
return;
 
INIT_LIST_HEAD(&retired_list);
spin_lock_irqsave(&ring->execlist_lock, flags);
spin_lock_irq(&ring->execlist_lock);
list_replace_init(&ring->execlist_retired_req_list, &retired_list);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
spin_unlock_irq(&ring->execlist_lock);
 
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx;
765,11 → 963,9
ctx->engine[ring->id].state;
 
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(req->ctx);
intel_lr_context_unpin(req);
list_del(&req->execlist_link);
kfree(req);
i915_gem_request_unreference(req);
}
}
 
795,15 → 991,15
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
 
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct intel_engine_cs *ring = req->ring;
int ret;
 
if (!ring->gpu_caches_dirty)
return 0;
 
ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
 
811,66 → 1007,66
return 0;
}
 
/**
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @ringbuf: Logical Ringbuffer to advance.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
* really happens during submission is that the context and current tail will be placed
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj,
struct intel_ringbuffer *ringbuf)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
 
intel_logical_ring_advance(ringbuf);
 
if (intel_ring_stopped(ring))
return;
 
execlists_context_queue(ring, ctx, ringbuf->tail);
}
 
static int intel_lr_context_pin(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
 
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (ctx->engine[ring->id].unpin_count++ == 0) {
ret = i915_gem_obj_ggtt_pin(ctx_obj,
GEN8_LR_CONTEXT_ALIGN, 0);
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
goto reset_unpin_count;
return ret;
 
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
}
 
ctx_obj->dirty = true;
 
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission)
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 
return ret;
 
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
reset_unpin_count:
ctx->engine[ring->id].unpin_count = 0;
 
return ret;
}
 
void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx)
static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
int ret = 0;
struct intel_engine_cs *ring = rq->ring;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
if (ret)
goto reset_pin_count;
}
return ret;
 
reset_pin_count:
rq->ctx->engine[ring->id].pin_count = 0;
return ret;
}
 
void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
{
struct intel_engine_cs *ring = rq->ring;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (--ctx->engine[ring->id].unpin_count == 0) {
if (--rq->ctx->engine[ring->id].pin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
877,165 → 1073,307
}
}
 
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
struct intel_context *ctx)
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret;
int ret, i;
struct intel_engine_cs *ring = req->ring;
struct intel_ringbuffer *ringbuf = req->ringbuf;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
 
if (ring->outstanding_lazy_seqno)
if (WARN_ON_ONCE(w->count == 0))
return 0;
 
if (ring->preallocated_lazy_request == NULL) {
struct drm_i915_gem_request *request;
ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
 
request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
ret = intel_logical_ring_begin(req, w->count * 2 + 2);
if (ret)
return ret;
 
if (ctx != ring->default_context) {
ret = intel_lr_context_pin(ring, ctx);
if (ret) {
kfree(request);
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_logical_ring_emit(ringbuf, w->reg[i].addr);
intel_logical_ring_emit(ringbuf, w->reg[i].value);
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
 
intel_logical_ring_advance(ringbuf);
 
ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
 
return 0;
}
}
 
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
#define wa_ctx_emit(batch, index, cmd) \
do { \
int __index = (index)++; \
if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
return -ENOSPC; \
} \
batch[__index] = (cmd); \
} while (0)
 
 
/*
* In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
* PIPE_CONTROL instruction. This is required for the flush to happen correctly
* but there is a slight complication as this is applied in WA batch where the
* values are only initialized once so we cannot take register value at the
* beginning and reuse it further; hence we save its value to memory, upload a
* constant value with bit21 set and then we restore it back with the saved value.
* To simplify the WA, a constant value is formed by using the default value
* of this register. This shouldn't be a problem because we are only modifying
* it for a short period and this batch in non-premptible. We can ofcourse
* use additional instructions that read the actual value of the register
* at that time and set our bit of interest but it makes the WA complicated.
*
* This WA is also required for Gen9 so extracting as a function avoids
* code duplication.
*/
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
uint32_t *const batch,
uint32_t index)
{
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
ring->preallocated_lazy_request = request;
/*
* WaDisableLSQCROPERFforOCL:skl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
 
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, l3sqc4_flush);
 
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_DC_FLUSH_ENABLE));
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
 
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
 
return index;
}
 
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
uint32_t offset,
uint32_t start_alignment)
{
return wa_ctx->offset = ALIGN(offset, start_alignment);
}
 
static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
int bytes)
static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
uint32_t offset,
uint32_t size_alignment)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret;
wa_ctx->size = offset - wa_ctx->offset;
 
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= bytes)
WARN(wa_ctx->size % size_alignment,
"wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
wa_ctx->size, size_alignment);
return 0;
}
 
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= bytes) {
seqno = request->seqno;
break;
}
}
/**
* gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
*
* @ring: only applicable for RCS
* @wa_ctx: structure representing wa_ctx
* offset: specifies start of the batch, should be cache-aligned. This is updated
* with the offset value received as input.
* size: size of the batch in DWORDS but HW expects in terms of cachelines
* @batch: page in which WA are loaded
* @offset: This field specifies the start of the batch, it should be
* cache-aligned otherwise it is adjusted accordingly.
* Typically we only have one indirect_ctx and per_ctx batch buffer which are
* initialized at the beginning and shared across all contexts but this field
* helps us to have multiple batches at different offsets and select them based
* on a criteria. At the moment this batch always start at the beginning of the page
* and at this point we don't have multiple wa_ctx batch buffers.
*
* The number of WA applied are not known at the beginning; we use this field
* to return the no of DWORDS written.
*
* It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
* so it adds NOOPs as padding to make it cacheline aligned.
* MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
* makes a complete batch buffer.
*
* Return: non-zero if we exceed the PAGE_SIZE limit.
*/
 
if (seqno == 0)
return -ENOSPC;
static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
{
uint32_t scratch_addr;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
ret = i915_wait_seqno(ring, seqno);
if (ret)
return ret;
/* WaDisableCtxRestoreArbitration:bdw,chv */
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
i915_gem_retire_requests_ring(ring);
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ringbuf->space = intel_ring_space(ringbuf);
return 0;
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
if (IS_BROADWELL(ring->dev)) {
int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
if (rc < 0)
return rc;
index = rc;
}
 
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
int bytes)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
int ret;
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
/* Actual scratch location is at 128 bytes offset */
scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
 
ret = logical_ring_wait_request(ringbuf, bytes);
if (ret != -ENOSPC)
return ret;
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
wa_ctx_emit(batch, index, scratch_addr);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
 
/* Force the context submission in case we have been skipping it */
intel_logical_ring_advance_and_submit(ringbuf);
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
 
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume
* timers don't work yet, so prevent a complete hang in that
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
/*
* MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
* execution depends on the length specified in terms of cache lines
* in the register CTX_RCS_INDIRECT_CTX
*/
 
do {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= bytes) {
ret = 0;
break;
return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
}
 
msleep(1);
/**
* gen8_init_perctx_bb() - initialize per ctx batch with WA
*
* @ring: only applicable for RCS
* @wa_ctx: structure representing wa_ctx
* offset: specifies start of the batch, should be cache-aligned.
* size: size of the batch in DWORDS but HW expects in terms of cachelines
* @batch: page in which WA are loaded
* @offset: This field specifies the start of this batch.
* This batch is started immediately after indirect_ctx batch. Since we ensure
* that indirect_ctx ends on a cacheline this batch is aligned automatically.
*
* The number of DWORDS written are returned using this field.
*
* This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
* to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
*/
static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
{
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
break;
/* WaDisableCtxRestoreArbitration:bdw,chv */
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 
if (time_after(jiffies, end)) {
ret = -EBUSY;
break;
}
} while (1);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
 
return ret;
return wa_ctx_end(wa_ctx, *offset = index, 1);
}
 
static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
{
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
int ret;
struct drm_device *dev = ring->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
if (ringbuf->space < rem) {
int ret = logical_ring_wait_for_space(ringbuf, rem);
/* WaDisableCtxRestoreArbitration:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
if (ret)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
if (ret < 0)
return ret;
index = ret;
 
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
 
return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
}
 
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
{
struct drm_device *dev = ring->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
ringbuf->tail = 0;
ringbuf->space = intel_ring_space(ringbuf);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
_MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
wa_ctx_emit(batch, index, MI_NOOP);
}
 
return 0;
/* WaDisableCtxRestoreArbitration:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
 
return wa_ctx_end(wa_ctx, *offset = index, 1);
}
 
static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
{
int ret;
 
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = logical_ring_wrap_buffer(ringbuf);
if (unlikely(ret))
return ret;
ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
if (!ring->wa_ctx.obj) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
return -ENOMEM;
}
 
if (unlikely(ringbuf->space < bytes)) {
ret = logical_ring_wait_for_space(ringbuf, bytes);
if (unlikely(ret))
ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
if (ret) {
DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
ret);
drm_gem_object_unreference(&ring->wa_ctx.obj->base);
return ret;
}
 
1042,80 → 1380,84
return 0;
}
 
/**
* intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
*
* @ringbuf: Logical ringbuffer.
* @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
*
* The ringbuffer might not be ready to accept the commands right away (maybe it needs to
* be wrapped, or wait a bit for the tail to be updated). This function takes care of that
* and also preallocates a request (every workload submission is still mediated through
* requests, same as it did with legacy ringbuffer submission).
*
* Return: non-zero if the ringbuffer is not ready to be written to.
*/
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (ring->wa_ctx.obj) {
i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
drm_gem_object_unreference(&ring->wa_ctx.obj->base);
ring->wa_ctx.obj = NULL;
}
}
 
static int intel_init_workaround_bb(struct intel_engine_cs *ring)
{
int ret;
uint32_t *batch;
uint32_t offset;
struct page *page;
struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
WARN_ON(ring->id != RCS);
 
ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
/* update this when WA for higher Gen are added */
if (INTEL_INFO(ring->dev)->gen > 9) {
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
INTEL_INFO(ring->dev)->gen);
return 0;
}
 
/* Preallocate the olr before touching the ring */
ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
if (ret)
/* some WA perform writes to scratch page, ensure it is valid */
if (ring->scratch.obj == NULL) {
DRM_ERROR("scratch page not allocated for %s\n", ring->name);
return -EINVAL;
}
 
ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
if (ret) {
DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
return ret;
 
ringbuf->space -= num_dwords * sizeof(uint32_t);
return 0;
}
 
static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret, i;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
page = i915_gem_object_get_page(wa_ctx->obj, 0);
batch = kmap_atomic(page);
offset = 0;
 
if (WARN_ON(w->count == 0))
return 0;
if (INTEL_INFO(ring->dev)->gen == 8) {
ret = gen8_init_indirectctx_bb(ring,
&wa_ctx->indirect_ctx,
batch,
&offset);
if (ret)
goto out;
 
ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(ringbuf);
ret = gen8_init_perctx_bb(ring,
&wa_ctx->per_ctx,
batch,
&offset);
if (ret)
return ret;
goto out;
} else if (INTEL_INFO(ring->dev)->gen == 9) {
ret = gen9_init_indirectctx_bb(ring,
&wa_ctx->indirect_ctx,
batch,
&offset);
if (ret)
goto out;
 
ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
ret = gen9_init_perctx_bb(ring,
&wa_ctx->per_ctx,
batch,
&offset);
if (ret)
return ret;
 
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_logical_ring_emit(ringbuf, w->reg[i].addr);
intel_logical_ring_emit(ringbuf, w->reg[i].value);
goto out;
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
 
intel_logical_ring_advance(ringbuf);
out:
kunmap_atomic(batch);
if (ret)
lrc_destroy_wa_ctx_obj(ring);
 
ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(ringbuf);
if (ret)
return ret;
 
return 0;
}
 
static int gen8_init_common_ring(struct intel_engine_cs *ring)
1122,14 → 1464,47
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u8 next_context_status_buffer_hw;
 
lrc_setup_hardware_status_page(ring,
ring->default_context->engine[ring->id].state);
 
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
 
if (ring->status_page.obj) {
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
(u32)ring->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(ring->mmio_base));
}
 
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
 
/*
* Instead of resetting the Context Status Buffer (CSB) read pointer to
* zero, we need to read the write pointer from hardware and use its
* value because "this register is power context save restored".
* Effectively, these states have been observed:
*
* | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
* BDW | CSB regs not reset | CSB regs reset |
* CHT | CSB regs not reset | CSB regs not reset |
*/
next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
& GEN8_CSB_PTR_MASK);
 
/*
* When the CSB registers are reset (also after power-up / gpu reset),
* CSB write pointer is set to all 1's, which is not valid, use '5' in
* this special case, so the first element read is CSB[0].
*/
if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
 
ring->next_context_status_buffer = next_context_status_buffer_hw;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
 
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
1155,27 → 1530,84
*/
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
ret = intel_init_pipe_control(ring);
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
return init_workarounds_ring(ring);
}
 
static int gen9_init_render_ring(struct intel_engine_cs *ring)
{
int ret;
 
ret = gen8_init_common_ring(ring);
if (ret)
return ret;
 
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
return init_workarounds_ring(ring);
}
 
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
u64 offset, unsigned flags)
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
bool ppgtt = !(flags & I915_DISPATCH_SECURE);
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
struct intel_engine_cs *ring = req->ring;
struct intel_ringbuffer *ringbuf = req->ringbuf;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
 
ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
if (ret)
return ret;
 
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
}
 
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
 
return 0;
}
 
static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
 
ret = intel_logical_ring_begin(ringbuf, 4);
/* Don't rely in hw updating PDPs, specially in lite-restore.
* Ideally, we should set Force PD Restore in ctx descriptor,
* but we can't. Force Restore would be a second option, but
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
if (req->ctx->ppgtt &&
(intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
!intel_vgpu_active(req->i915->dev)) {
ret = intel_logical_ring_emit_pdps(req);
if (ret)
return ret;
}
 
req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
}
 
ret = intel_logical_ring_begin(req, 4);
if (ret)
return ret;
 
/* FIXME(BDW): Address space and security selectors. */
intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
(ppgtt<<8) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
intel_logical_ring_emit(ringbuf, MI_NOOP);
1217,10 → 1649,11
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
static int gen8_emit_flush(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 unused)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1227,21 → 1660,23
uint32_t cmd;
int ret;
 
ret = intel_logical_ring_begin(ringbuf, 4);
ret = intel_logical_ring_begin(request, 4);
if (ret)
return ret;
 
cmd = MI_FLUSH_DW + 1;
 
if (ring == &dev_priv->ring[VCS]) {
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
MI_FLUSH_DW_STORE_INDEX |
MI_FLUSH_DW_OP_STOREDW;
} else {
if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
MI_FLUSH_DW_OP_STOREDW;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 
if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
cmd |= MI_INVALIDATE_TLB;
if (ring == &dev_priv->ring[VCS])
cmd |= MI_INVALIDATE_BSD;
}
 
intel_logical_ring_emit(ringbuf, cmd);
1255,12 → 1690,14
return 0;
}
 
static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 flush_domains)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa;
u32 flags = 0;
int ret;
 
1269,6 → 1706,7
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
 
if (invalidate_domains) {
1282,11 → 1720,27
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
 
ret = intel_logical_ring_begin(ringbuf, 6);
/*
* On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
* control.
*/
vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
 
ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
return ret;
 
if (vf_flush_wa) {
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
}
 
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr);
intel_logical_ring_emit(ringbuf, 0);
1307,17 → 1761,51
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
 
static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
 
/*
* On BXT A steppings there is a HW coherency issue whereby the
* MI_STORE_DATA_IMM storing the completed request's seqno
* occasionally doesn't invalidate the CPU cache. Work around this by
* clflushing the corresponding cacheline whenever the caller wants
* the coherency to be guaranteed. Note that this cacheline is known
* to be clean at this point, since we only write it in
* bxt_a_set_seqno(), where we also do a clflush after the write. So
* this clflush in practice becomes an invalidate operation.
*/
 
if (!lazy_coherency)
intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 
/* See bxt_a_get_seqno() explaining the reason for the clflush. */
intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
 
ret = intel_logical_ring_begin(ringbuf, 6);
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
ret = intel_logical_ring_begin(request, 8);
if (ret)
return ret;
 
cmd = MI_STORE_DWORD_IMM_GEN8;
cmd = MI_STORE_DWORD_IMM_GEN4;
cmd |= MI_GLOBAL_GTT;
 
intel_logical_ring_emit(ringbuf, cmd);
1325,14 → 1813,71
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf);
intel_logical_ring_advance_and_submit(request);
 
/*
* Here we add two extra NOOPs as padding to avoid
* lite restore of a context with HEAD==TAIL.
*/
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
 
return 0;
}
 
static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
{
struct render_state so;
int ret;
 
ret = i915_gem_render_state_prepare(req->ring, &so);
if (ret)
return ret;
 
if (so.rodata == NULL)
return 0;
 
ret = req->ring->emit_bb_start(req, so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
goto out;
 
ret = req->ring->emit_bb_start(req,
(so.ggtt_offset + so.aux_batch_offset),
I915_DISPATCH_SECURE);
if (ret)
goto out;
 
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
out:
i915_gem_render_state_fini(&so);
return ret;
}
 
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
 
ret = intel_logical_ring_workarounds_emit(req);
if (ret)
return ret;
 
ret = intel_rcs_context_init_mocs(req);
/*
* Failing to program the MOCS is non-fatal.The system will not
* run at peak performance. So generate an error and carry on.
*/
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
return intel_lr_context_render_state_init(req);
}
 
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
1350,18 → 1895,19
 
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
 
if (ring->cleanup)
ring->cleanup(ring);
 
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
 
if (ring->status_page.obj) {
kunmap(sg_page(ring->status_page.obj->pages->sgl));
ring->status_page.obj = NULL;
}
 
lrc_destroy_wa_ctx_obj(ring);
}
 
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
1374,25 → 1920,33
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
i915_gem_batch_pool_init(dev, &ring->batch_pool);
init_waitqueue_head(&ring->irq_queue);
 
INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0;
 
ret = i915_cmd_parser_init_ring(ring);
if (ret)
return ret;
 
if (ring->init) {
ret = ring->init(ring);
ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
if (ret)
return ret;
 
/* As this is the default context, always pin it */
ret = intel_lr_context_do_pin(
ring,
ring->default_context->engine[ring->id].state,
ring->default_context->engine[ring->id].ringbuf);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
return ret;
}
 
ret = intel_lr_context_deferred_create(ring->default_context, ring);
 
return ret;
}
 
1400,6 → 1954,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
 
ring->name = "render ring";
ring->id = RCS;
1411,11 → 1966,19
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
ring->init = gen8_init_render_ring;
ring->init_context = intel_logical_ring_workarounds_emit;
if (INTEL_INFO(dev)->gen >= 9)
ring->init_hw = gen9_init_render_ring;
else
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush_render;
ring->irq_get = gen8_logical_ring_get_irq;
1422,9 → 1985,31
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
 
return logical_ring_init(dev, ring);
ring->dev = dev;
 
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
 
ret = intel_init_workaround_bb(ring);
if (ret) {
/*
* We continue even if we fail to initialize WA batch
* because we only expect rare glitches but nothing
* critical to prevent us from using GPU
*/
DRM_ERROR("WA batch buffer initialization failed: %d\n",
ret);
}
 
ret = logical_ring_init(dev, ring);
if (ret) {
lrc_destroy_wa_ctx_obj(ring);
}
 
return ret;
}
 
static int logical_bsd_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1438,9 → 2023,14
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
ring->init = gen8_init_common_ring;
ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
1463,7 → 2053,7
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 
ring->init = gen8_init_common_ring;
ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
1488,9 → 2078,14
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
ring->init = gen8_init_common_ring;
ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
1513,9 → 2108,14
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
ring->init = gen8_init_common_ring;
ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
1568,14 → 2168,8
goto cleanup_vebox_ring;
}
 
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
goto cleanup_bsd2_ring;
 
return 0;
 
cleanup_bsd2_ring:
intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
cleanup_vebox_ring:
intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
cleanup_blt_ring:
1588,38 → 2182,49
return ret;
}
 
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
struct intel_context *ctx)
static u32
make_rpcs(struct drm_device *dev)
{
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct render_state so;
struct drm_i915_file_private *file_priv = ctx->file_priv;
struct drm_file *file = file_priv ? file_priv->file : NULL;
int ret;
u32 rpcs = 0;
 
ret = i915_gem_render_state_prepare(ring, &so);
if (ret)
return ret;
 
if (so.rodata == NULL)
/*
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
if (INTEL_INFO(dev)->gen < 9)
return 0;
 
ret = ring->emit_bb_start(ringbuf,
so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
goto out;
/*
* Starting in Gen9, render power gating can leave
* slice/subslice/EU in a partially enabled state. We
* must make an explicit request through RPCS for full
* enablement.
*/
if (INTEL_INFO(dev)->has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
rpcs |= INTEL_INFO(dev)->slice_total <<
GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
 
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
if (INTEL_INFO(dev)->has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
 
ret = __i915_add_request(ring, file, so.obj, NULL);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
i915_gem_render_state_fini(&so);
return ret;
if (INTEL_INFO(dev)->has_eu_pg) {
rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
 
return rpcs;
}
 
static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
1650,7 → 2255,7
 
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
page = i915_gem_object_get_page(ctx_obj, 1);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1665,7 → 2270,9
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
_MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1690,9 → 2297,6
reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
reg_state[CTX_SECOND_BB_STATE+1] = 0;
if (ring->id == RCS) {
/* TODO: according to BSpec, the register state context
* for CHV does not have these. OTOH, these registers do
* exist in CHV. I'm waiting for a clarification */
reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
1699,7 → 2303,22
reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
if (ring->wa_ctx.obj) {
struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
 
reg_state[CTX_RCS_INDIRECT_CTX+1] =
(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
 
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
 
reg_state[CTX_BB_PER_CTX_PTR+1] =
(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
0x01;
}
}
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
1712,24 → 2331,34
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
 
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
ASSIGN_CTX_PML4(ppgtt, reg_state);
} else {
/* 32b PPGTT
* PDP*_DESCRIPTOR contains the base address of space supported.
* With dynamic page allocation, PDPs may not be allocated at
* this point. Point the unallocated PDPs to the scratch page
*/
ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
 
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev);
}
 
kunmap_atomic(reg_state);
 
ctx_obj->dirty = 1;
// set_page_dirty(page);
i915_gem_object_unpin_pages(ctx_obj);
 
return 0;
1759,8 → 2388,8
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
WARN_ON(ctx->engine[ring->id].pin_count);
intel_ringbuffer_free(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
}
}
1794,12 → 2423,13
struct drm_i915_gem_object *default_ctx_obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct page *page;
 
/* The status page is offset 0 from the default context object
* in LRC mode. */
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
ring->status_page.page_addr =
kmap(sg_page(default_ctx_obj->pages->sgl));
/* The HWSP is part of the default context object in LRC mode. */
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
+ LRC_PPHWSP_PN * PAGE_SIZE;
page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
ring->status_page.page_addr = kmap(page);
ring->status_page.obj = default_ctx_obj;
 
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
1808,7 → 2438,7
}
 
/**
* intel_lr_context_deferred_create() - create the LRC specific bits of a context
* intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
* @ring: engine to be used with the context.
*
1820,10 → 2450,10
*
* Return: non-zero on error.
*/
int intel_lr_context_deferred_create(struct intel_context *ctx,
 
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
1831,108 → 2461,96
int ret;
 
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
if (ctx->engine[ring->id].state)
return 0;
WARN_ON(ctx->engine[ring->id].state);
 
context_size = round_up(get_lr_context_size(ring), 4096);
 
ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
if (IS_ERR(ctx_obj)) {
ret = PTR_ERR(ctx_obj);
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
return ret;
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
 
ctx_obj = i915_gem_alloc_object(dev, context_size);
if (!ctx_obj) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
return -ENOMEM;
}
 
if (is_global_default_ctx) {
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
if (ret) {
DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
ret);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error_deref_obj;
}
}
 
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
ring->name);
ret = -ENOMEM;
goto error_unpin_ctx;
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error_ringbuf;
}
 
ringbuf->ring = ring;
ringbuf->FIXME_lrc_ctx = ctx;
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
 
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->effective_size = ringbuf->size;
ringbuf->head = 0;
ringbuf->tail = 0;
ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
if (ctx != ring->default_context && ring->init_context) {
struct drm_i915_gem_request *req;
 
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
ret = i915_gem_request_alloc(ring,
ctx, &req);
if (ret) {
DRM_DEBUG_DRIVER(
"Failed to allocate ringbuffer obj %s: %d\n",
ring->name, ret);
goto error_free_rbuf;
DRM_ERROR("ring create req: %d\n",
ret);
goto error_ringbuf;
}
 
if (is_global_default_ctx) {
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
ret = ring->init_context(req);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
goto error_destroy_rbuf;
DRM_ERROR("ring init context: %d\n",
ret);
i915_gem_request_cancel(req);
goto error_ringbuf;
}
i915_add_request_no_flush(req);
}
return 0;
 
error_ringbuf:
intel_ringbuffer_free(ringbuf);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
return ret;
}
 
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error;
}
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int i;
 
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[ring->id].ringbuf;
uint32_t *reg_state;
struct page *page;
 
if (ctx == ring->default_context)
lrc_setup_hardware_status_page(ring, ctx_obj);
if (!ctx_obj)
continue;
 
if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
ret = ring->init_context(ring, ctx);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
if (i915_gem_object_get_pages(ctx_obj)) {
WARN(1, "Failed get_pages for context obj\n");
continue;
}
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
 
ret = intel_lr_context_render_state_init(ring, ctx);
if (ret) {
DRM_ERROR("Init render state failed: %d\n", ret);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
goto error;
}
ctx->rcs_initialized = true;
}
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0;
 
return 0;
kunmap_atomic(reg_state);
 
error:
if (is_global_default_ctx)
intel_unpin_ringbuffer_obj(ringbuf);
error_destroy_rbuf:
intel_destroy_ringbuffer_obj(ringbuf);
error_free_rbuf:
kfree(ringbuf);
error_unpin_ctx:
if (is_global_default_ctx)
i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
ringbuf->head = 0;
ringbuf->tail = 0;
}
}
/drivers/video/drm/i915/intel_lrc.h
25,21 → 25,30
#define _INTEL_LRC_H_
 
#define GEN8_LR_CONTEXT_ALIGN 4096
#define GEN8_CSB_ENTRIES 6
#define GEN8_CSB_PTR_MASK 0x07
 
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234)
#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
 
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
61,58 → 70,32
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
 
/* Logical Ring Contexts */
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
struct intel_context *ctx);
 
/* One extra page is added before LRC for GuC as shared data */
#define LRC_GUCSHR_PN (0)
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
 
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct intel_engine_cs *ring,
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring);
 
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
struct list_head *vmas);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
/**
* struct intel_ctx_submit_request - queued context submission request
* @ctx: Context to submit to the ELSP.
* @ring: Engine to submit it to.
* @tail: how far in the context's ringbuffer this request goes to.
* @execlist_link: link in the submission queue.
* @work: workqueue for processing this request in a bottom half.
* @elsp_submitted: no. of times this request has been sent to the ELSP.
*
* The ELSP only accepts two elements at a time, so we queue context/tail
* pairs on a given queue (ring->execlist_queue) until the hardware is
* available. The queue serves a double purpose: we also use it to keep track
* of the up to 2 contexts currently in the hardware (usually one in execution
* and the other queued up by the GPU): We only remove elements from the head
* of the queue when the hardware informs us that an element has been
* completed.
*
* All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
*/
struct intel_ctx_submit_request {
struct intel_context *ctx;
struct intel_engine_cs *ring;
u32 tail;
 
struct list_head execlist_link;
 
int elsp_submitted;
};
 
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
 
#endif /* _INTEL_LRC_H_ */
/drivers/video/drm/i915/intel_lvds.c
32,6 → 32,7
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
93,19 → 94,15
}
 
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp, flags = 0;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
int dotclock;
 
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
 
tmp = I915_READ(lvds_reg);
tmp = I915_READ(lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
115,7 → 112,7
else
flags |= DRM_MODE_FLAG_PVSYNC;
 
pipe_config->adjusted_mode.flags |= flags;
pipe_config->base.adjusted_mode.flags |= flags;
 
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
129,7 → 126,7
if (HAS_PCH_SPLIT(dev_priv->dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
 
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
138,8 → 135,7
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode =
&crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
 
167,7 → 163,7
 
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
temp |= crtc->config.gmch_pfit.lvds_border_bits;
temp |= crtc->config->gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
190,7 → 186,7
if (INTEL_INFO(dev)->gen == 4) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
if (crtc->config.dither && crtc->config.pipe_bpp == 18)
if (crtc->config->dither && crtc->config->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
238,8 → 234,6
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, stat_reg;
 
251,8 → 245,6
stat_reg = PP_STATUS;
}
 
intel_panel_disable_backlight(intel_connector);
 
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
261,6 → 253,31
POSTING_READ(lvds_encoder->reg);
}
 
static void gmch_disable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
 
intel_panel_disable_backlight(intel_connector);
 
intel_disable_lvds(encoder);
}
 
static void pch_disable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
 
intel_panel_disable_backlight(intel_connector);
}
 
static void pch_post_disable_lvds(struct intel_encoder *encoder)
{
intel_disable_lvds(encoder);
}
 
static enum drm_mode_status
intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
267,17 → 284,20
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
 
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
if (fixed_mode->clock > max_pixclk)
return MODE_CLOCK_HIGH;
 
return MODE_OK;
}
 
static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder =
284,8 → 304,8
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
unsigned int lvds_bpp;
 
/* Should never happen!! */
452,7 → 472,7
*/
if (!HAS_PCH_SPLIT(dev)) {
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
intel_display_resume(dev);
drm_modeset_unlock_all(dev);
}
 
508,7 → 528,7
intel_connector->panel.fitting_mode = value;
 
crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) {
if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
527,11 → 547,14
};
 
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = intel_connector_dpms,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
 
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
777,7 → 800,7
child->device_type != DEVICE_TYPE_LFP)
continue;
 
if (intel_gmbus_is_port_valid(child->i2c_pin))
if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
*i2c_pin = child->i2c_pin;
 
/* However, we cannot trust the BIOS writers to populate
809,12 → 832,28
static const struct dmi_system_id intel_dual_link_lvds[] = {
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro (Core i5/i7 Series)",
.ident = "Apple MacBook Pro 15\" (2010)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
},
},
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro 15\" (2011)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
},
},
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro 15\" (2012)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
},
},
{ } /* terminating entry */
};
 
844,6 → 883,11
if (i915.lvds_channel_mode > 0)
return i915.lvds_channel_mode == 2;
 
/* single channel LVDS is limited to 112 MHz */
if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
> 112999)
return true;
 
if (dmi_check_system(intel_dual_link_lvds))
return true;
 
895,6 → 939,7
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc;
u32 lvds_reg;
u32 lvds;
int pipe;
u8 pin;
906,7 → 951,7
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_CONTROL,
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
} else {
} else if (INTEL_INFO(dev_priv)->gen < 5) {
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
917,14 → 962,15
if (dmi_check_system(intel_no_lvds))
return;
 
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
 
lvds = I915_READ(lvds_reg);
 
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
if ((lvds & LVDS_DETECTED) == 0)
return;
if (dev_priv->vbt.edp_support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
932,6 → 978,27
}
}
 
pin = GMBUS_PIN_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
}
 
/* Set the Panel Power On/Off timings if uninitialized. */
if (INTEL_INFO(dev_priv)->gen < 5 &&
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
 
/* Set T3 to 35ms and Tx to 200ms */
I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
 
DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n");
}
 
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return;
942,6 → 1009,12
return;
}
 
if (intel_connector_init(&lvds_connector->base) < 0) {
kfree(lvds_connector);
kfree(lvds_encoder);
return;
}
 
lvds_encoder->attached_connector = lvds_connector;
 
intel_encoder = &lvds_encoder->base;
957,7 → 1030,12
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->disable = intel_disable_lvds;
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_lvds;
intel_encoder->post_disable = pch_post_disable_lvds;
} else {
intel_encoder->disable = gmch_disable_lvds;
}
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
979,11 → 1057,7
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
 
if (HAS_PCH_SPLIT(dev)) {
lvds_encoder->reg = PCH_LVDS;
} else {
lvds_encoder->reg = LVDS;
}
lvds_encoder->reg = lvds_reg;
 
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
1037,26 → 1111,10
drm_mode_debug_printmodeline(scan);
 
fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode) {
downclock_mode =
intel_find_panel_downclock(dev,
fixed_mode, connector);
if (downclock_mode != NULL &&
i915.lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = true;
dev_priv->lvds_downclock =
downclock_mode->clock;
DRM_DEBUG_KMS("LVDS downclock is found"
" in EDID. Normal clock %dKhz, "
"downclock %dKhz\n",
fixed_mode->clock,
dev_priv->lvds_downclock);
}
if (fixed_mode)
goto out;
}
}
}
 
/* Failed to get EDID, what about VBT? */
if (dev_priv->vbt.lfp_lvds_vbt_mode) {
1080,7 → 1138,6
if (HAS_PCH_SPLIT(dev))
goto failed;
 
lvds = I915_READ(LVDS);
pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
crtc = intel_get_crtc_for_pipe(dev, pipe);
 
1101,6 → 1158,8
out:
mutex_unlock(&dev->mode_config.mutex);
 
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
 
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
1110,7 → 1169,6
 
drm_connector_register(connector);
 
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector, INVALID_PIPE);
 
return;
/drivers/video/drm/i915/intel_mocs.c
0,0 → 1,335
/*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions: *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
 
#include "intel_mocs.h"
#include "intel_lrc.h"
#include "intel_ringbuffer.h"
 
/* structures required */
struct drm_i915_mocs_entry {
u32 control_value;
u16 l3cc_value;
};
 
struct drm_i915_mocs_table {
u32 size;
const struct drm_i915_mocs_entry *table;
};
 
/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
#define LE_CACHEABILITY(value) ((value) << 0)
#define LE_TGT_CACHE(value) ((value) << 2)
#define LE_LRUM(value) ((value) << 4)
#define LE_AOM(value) ((value) << 6)
#define LE_RSC(value) ((value) << 7)
#define LE_SCC(value) ((value) << 8)
#define LE_PFM(value) ((value) << 11)
#define LE_SCF(value) ((value) << 14)
 
/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
#define L3_ESC(value) ((value) << 0)
#define L3_SCC(value) ((value) << 1)
#define L3_CACHEABILITY(value) ((value) << 4)
 
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
 
/* (e)LLC caching options */
#define LE_PAGETABLE 0
#define LE_UC 1
#define LE_WT 2
#define LE_WB 3
 
/* L3 caching options */
#define L3_DIRECT 0
#define L3_UC 1
#define L3_RESERVED 2
#define L3_WB 3
 
/* Target cache */
#define ELLC 0
#define LLC 1
#define LLC_ELLC 2
 
/*
* MOCS tables
*
* These are the MOCS tables that are programmed across all the rings.
* The control value is programmed to all the rings that support the
* MOCS registers. While the l3cc_values are only programmed to the
* LNCFCMOCS0 - LNCFCMOCS32 registers.
*
* These tables are intended to be kept reasonably consistent across
* platforms. However some of the fields are not applicable to all of
* them.
*
* Entries not part of the following tables are undefined as far as
* userspace is concerned and shouldn't be relied upon. For the time
* being they will be implicitly initialized to the strictest caching
* configuration (uncached) to guarantee forwards compatibility with
* userspace programs written against more recent kernels providing
* additional MOCS entries.
*
* NOTE: These tables MUST start with being uncached and the length
* MUST be less than 63 as the last two registers are reserved
* by the hardware. These tables are part of the kernel ABI and
* may only be updated incrementally by adding entries at the
* end.
*/
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
/* { 0x00000009, 0x0010 } */
{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
/* { 0x00000038, 0x0030 } */
{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
/* { 0x0000003b, 0x0030 } */
{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
};
 
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
/* { 0x00000009, 0x0010 } */
{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
/* { 0x00000038, 0x0030 } */
{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
/* { 0x0000003b, 0x0030 } */
{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
};
 
/**
* get_mocs_settings()
* @dev: DRM device.
* @table: Output table that will be made to point at appropriate
* MOCS values for the device.
*
* This function will return the values of the MOCS table that needs to
* be programmed for the platform. It will return the values that need
* to be programmed and if they need to be programmed.
*
* Return: true if there are applicable MOCS settings for the device.
*/
static bool get_mocs_settings(struct drm_device *dev,
struct drm_i915_mocs_table *table)
{
bool result = false;
 
if (IS_SKYLAKE(dev)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
} else if (IS_BROXTON(dev)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->table = broxton_mocs_table;
result = true;
} else {
WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
"Platform that should have a MOCS table does not.\n");
}
 
return result;
}
 
/**
* emit_mocs_control_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
* @reg_base: The base for the engine that needs to be programmed.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address.
*
* Return: 0 on success, otherwise the error status.
*/
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table,
u32 reg_base)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
unsigned int index;
int ret;
 
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
 
ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
if (ret) {
DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
return ret;
}
 
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
for (index = 0; index < table->size; index++) {
intel_logical_ring_emit(ringbuf, reg_base + index * 4);
intel_logical_ring_emit(ringbuf,
table->table[index].control_value);
}
 
/*
* Ok, now set the unused entries to uncached. These entries
* are officially undefined and no contract for the contents
* and settings is given for these entries.
*
* Entry 0 in the table is uncached - so we are just writing
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
intel_logical_ring_emit(ringbuf, reg_base + index * 4);
intel_logical_ring_emit(ringbuf, table->table[0].control_value);
}
 
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
 
return 0;
}
 
/**
* emit_mocs_l3cc_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address. This register set is
* programmed in pairs.
*
* Return: 0 on success, otherwise the error status.
*/
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
unsigned int count;
unsigned int i;
u32 value;
u32 filler = (table->table[0].l3cc_value & 0xffff) |
((table->table[0].l3cc_value & 0xffff) << 16);
int ret;
 
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
 
ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
if (ret) {
DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
return ret;
}
 
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
value = (table->table[count].l3cc_value & 0xffff) |
((table->table[count + 1].l3cc_value & 0xffff) << 16);
 
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
intel_logical_ring_emit(ringbuf, value);
}
 
if (table->size & 0x01) {
/* Odd table size - 1 left over */
value = (table->table[count].l3cc_value & 0xffff) |
((table->table[0].l3cc_value & 0xffff) << 16);
} else
value = filler;
 
/*
* Now set the rest of the table to uncached - use entry 0 as
* this will be uncached. Leave the last pair uninitialised as
* they are reserved by the hardware.
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
intel_logical_ring_emit(ringbuf, value);
 
value = filler;
}
 
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
 
return 0;
}
 
/**
* intel_rcs_context_init_mocs() - program the MOCS register.
* @req: Request to set up the MOCS tables for.
*
* This function will emit a batch buffer with the values required for
* programming the MOCS register values for all the currently supported
* rings.
*
* These registers are partially stored in the RCS context, so they are
* emitted at the same time so that when a context is created these registers
* are set up. These registers have to be emitted into the start of the
* context as setting the ELSP will re-init some of these registers back
* to the hw values.
*
* Return: 0 on success, otherwise the error status.
*/
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
{
struct drm_i915_mocs_table t;
int ret;
 
if (get_mocs_settings(req->ring->dev, &t)) {
/* Program the control registers */
ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
if (ret)
return ret;
 
ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
if (ret)
return ret;
 
ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
if (ret)
return ret;
 
ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
if (ret)
return ret;
 
ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
if (ret)
return ret;
 
/* Now program the l3cc registers */
ret = emit_mocs_l3cc_table(req, &t);
if (ret)
return ret;
}
 
return 0;
}
/drivers/video/drm/i915/intel_mocs.h
0,0 → 1,57
/*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
 
#ifndef INTEL_MOCS_H
#define INTEL_MOCS_H
 
/**
* DOC: Memory Objects Control State (MOCS)
*
* Motivation:
* In previous Gens the MOCS settings was a value that was set by user land as
* part of the batch. In Gen9 this has changed to be a single table (per ring)
* that all batches now reference by index instead of programming the MOCS
* directly.
*
* The one wrinkle in this is that only PART of the MOCS tables are included
* in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
* registers). The rest are not (the settings for the other rings).
*
* This table needs to be set at system start-up because the way the table
* interacts with the contexts and the GmmLib interface.
*
*
* Implementation:
*
* The tables (one per supported platform) are defined in intel_mocs.c
* and are programmed in the first batch after the context is loaded
* (with the hardware workarounds). This will then let the usual
* context handling keep the MOCS in step.
*/
 
#include <drm/drmP.h>
#include "i915_drv.h"
 
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
 
#endif
/drivers/video/drm/i915/intel_modes.c
126,3 → 126,12
 
drm_object_attach_property(&connector->base, prop, 0);
}
 
void
intel_attach_aspect_ratio_property(struct drm_connector *connector)
{
if (!drm_mode_create_aspect_ratio_property(connector->dev))
drm_object_attach_property(&connector->base,
connector->dev->mode_config.aspect_ratio_property,
DRM_MODE_PICTURE_ASPECT_NONE);
}
/drivers/video/drm/i915/intel_opregion.c
53,6 → 53,7
#define MBOX_ACPI (1<<0)
#define MBOX_SWSCI (1<<1)
#define MBOX_ASLE (1<<2)
#define MBOX_ASLE_EXT (1<<4)
 
struct opregion_header {
u8 signature[16];
62,7 → 63,10
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
u32 driver_model;
u32 pcon;
u8 dver[32];
u8 rsvd[124];
} __packed;
 
/* OpRegion mailbox #1: public ACPI methods */
84,7 → 88,9
u32 evts; /* ASL supported events */
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
u32 did2[7]; /* extended supported display devices ID list */
u32 cpd2[7]; /* extended attached display devices list */
u8 rsvd2[4];
} __packed;
 
/* OpRegion mailbox #2: SWSCI */
113,7 → 119,10
u32 pcft; /* power conservation features */
u32 srot; /* supported rotation angles */
u32 iuer; /* IUER events */
u8 rsvd[86];
u64 fdss;
u32 fdsp;
u32 stat;
u8 rsvd[70];
} __packed;
 
/* Driver readiness indicator */
232,7 → 241,7
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
u32 main_function, sub_function, scic;
u16 pci_swsci;
u32 dslp;
257,7 → 266,7
}
 
/* Driver sleep timeout in ms. */
dslp = ioread32(&swsci->dslp);
dslp = swsci->dslp;
if (!dslp) {
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
270,7 → 279,7
}
 
/* The spec tells us to do this, but we are the only user... */
scic = ioread32(&swsci->scic);
scic = swsci->scic;
if (scic & SWSCI_SCIC_INDICATOR) {
DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
return -EBUSY;
278,8 → 287,8
 
scic = function | SWSCI_SCIC_INDICATOR;
 
iowrite32(parm, &swsci->parm);
iowrite32(scic, &swsci->scic);
swsci->parm = parm;
swsci->scic = scic;
 
/* Ensure SCI event is selected and event trigger is cleared. */
pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
294,7 → 303,7
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
 
/* Poll for the result. */
#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
if (wait_for(C, dslp)) {
DRM_DEBUG_DRIVER("SWSCI request timed out\n");
return -ETIMEDOUT;
310,7 → 319,7
}
 
if (parm_out)
*parm_out = ioread32(&swsci->parm);
*parm_out = swsci->parm;
 
return 0;
 
334,7 → 343,11
if (!HAS_DDI(dev))
return 0;
 
if (intel_encoder->type == INTEL_OUTPUT_DSI)
port = 0;
else
port = intel_ddi_get_encoder_port(intel_encoder);
 
if (port == PORT_E) {
port = 0;
} else {
356,6 → 369,7
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
break;
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_DSI:
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
break;
default:
396,25 → 410,15
return -EINVAL;
}
 
/*
* If the vendor backlight interface is not in use and ACPI backlight interface
* is broken, do not bother processing backlight change requests from firmware.
*/
static bool should_ignore_backlight_request(void)
{
return acpi_video_backlight_support() &&
!acpi_video_verify_backlight_support();
}
 
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
struct opregion_asle *asle = dev_priv->opregion.asle;
 
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
if (should_ignore_backlight_request()) {
if (acpi_video_get_backlight_type() == acpi_backlight_native) {
DRM_DEBUG_KMS("opregion backlight request ignored\n");
return 0;
}
435,7 → 439,7
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
 
drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
522,7 → 526,7
struct drm_i915_private *dev_priv =
container_of(opregion, struct drm_i915_private, opregion);
struct drm_device *dev = dev_priv->dev;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
 
529,7 → 533,7
if (!asle)
return;
 
aslc_req = ioread32(&asle->aslc);
aslc_req = asle->aslc;
 
if (!(aslc_req & ASLC_REQ_MSK)) {
DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
538,34 → 542,34
}
 
if (aslc_req & ASLC_SET_ALS_ILLUM)
aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
aslc_stat |= asle_set_als_illum(dev, asle->alsi);
 
if (aslc_req & ASLC_SET_BACKLIGHT)
aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
aslc_stat |= asle_set_backlight(dev, asle->bclp);
 
if (aslc_req & ASLC_SET_PFIT)
aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
aslc_stat |= asle_set_pfit(dev, asle->pfit);
 
if (aslc_req & ASLC_SET_PWM_FREQ)
aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb);
 
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
aslc_stat |= asle_set_supported_rotation_angles(dev,
ioread32(&asle->srot));
asle->srot);
 
if (aslc_req & ASLC_BUTTON_ARRAY)
aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
aslc_stat |= asle_set_button_array(dev, asle->iuer);
 
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
aslc_stat |= asle_set_convertible(dev, asle->iuer);
 
if (aslc_req & ASLC_DOCKING_INDICATOR)
aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
aslc_stat |= asle_set_docking(dev, asle->iuer);
 
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
aslc_stat |= asle_isct_state(dev);
 
iowrite32(aslc_stat, &asle->aslc);
asle->aslc = aslc_stat;
}
 
void intel_opregion_asle_intr(struct drm_device *dev)
590,8 → 594,8
Linux, these are handled by the dock, button and video drivers.
*/
 
struct opregion_acpi __iomem *acpi;
struct acpi_bus_event *event = data;
struct opregion_acpi *acpi;
int ret = NOTIFY_OK;
 
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
602,11 → 606,10
 
acpi = system_opregion->acpi;
 
if (event->type == 0x80 &&
(ioread32(&acpi->cevt) & 1) == 0)
if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
ret = NOTIFY_BAD;
 
iowrite32(0, &acpi->csts);
acpi->csts = 0;
 
return ret;
}
621,6 → 624,38
* (version 3)
*/
 
static u32 get_did(struct intel_opregion *opregion, int i)
{
u32 did;
 
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
did = opregion->acpi->didl[i];
} else {
i -= ARRAY_SIZE(opregion->acpi->didl);
 
if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
return 0;
 
did = opregion->acpi->did2[i];
}
 
return did;
}
 
static void set_did(struct intel_opregion *opregion, int i, u32 val)
{
if (i < ARRAY_SIZE(opregion->acpi->didl)) {
opregion->acpi->didl[i] = val;
} else {
i -= ARRAY_SIZE(opregion->acpi->didl);
 
if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
return;
 
opregion->acpi->did2[i] = val;
}
}
 
static void intel_didl_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
630,7 → 665,7
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
u32 temp;
u32 temp, max_outputs;
int i = 0;
 
handle = ACPI_HANDLE(&dev->pdev->dev);
649,32 → 684,41
}
 
if (!acpi_video_bus) {
pr_warn("No ACPI video bus found\n");
DRM_ERROR("No ACPI video bus found\n");
return;
}
 
/*
* In theory, did2, the extended didl, gets added at opregion version
* 3.0. In practice, however, we're supposed to set it for earlier
* versions as well, since a BIOS that doesn't understand did2 should
* not look at it anyway. Use a variable so we can tweak this if a need
* arises later.
*/
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
 
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
if (i >= 8) {
dev_dbg(&dev->pdev->dev,
"More than 8 outputs detected via ACPI\n");
if (i >= max_outputs) {
DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
max_outputs);
return;
}
status =
acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
NULL, &device_id);
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
iowrite32((u32)(device_id & 0x0f0f),
&opregion->acpi->didl[i]);
i++;
set_did(opregion, i++, (u32)(device_id & 0x0f0f));
}
}
 
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
iowrite32(0, &opregion->acpi->didl[i]);
DRM_DEBUG_KMS("%d outputs detected\n", i);
 
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
set_did(opregion, i, 0);
return;
 
blind_set:
681,9 → 725,9
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
dev_dbg(&dev->pdev->dev,
"More than 8 outputs in connector list\n");
if (i >= max_outputs) {
DRM_DEBUG_KMS("More than %u outputs in connector list\n",
max_outputs);
return;
}
switch (connector->connector_type) {
708,9 → 752,8
output_type = ACPI_LVDS_OUTPUT;
break;
}
temp = ioread32(&opregion->acpi->didl[i]);
iowrite32(temp | (1<<31) | output_type | i,
&opregion->acpi->didl[i]);
temp = get_did(opregion, i);
set_did(opregion, i, temp | (1 << 31) | output_type | i);
i++;
}
goto end;
730,8 → 773,8
* display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
* there are less than eight devices. */
do {
disp_id = ioread32(&opregion->acpi->didl[i]);
iowrite32(disp_id, &opregion->acpi->cadl[i]);
disp_id = get_did(opregion, i);
opregion->acpi->cadl[i] = disp_id;
} while (++i < 8 && disp_id != 0);
}
 
744,16 → 787,14
return;
 
if (opregion->acpi) {
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_didl_outputs(dev);
intel_setup_cadls(dev);
}
 
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
iowrite32(0, &opregion->acpi->csts);
iowrite32(1, &opregion->acpi->drdy);
opregion->acpi->csts = 0;
opregion->acpi->drdy = 1;
 
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
760,8 → 801,8
}
 
if (opregion->asle) {
iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche);
iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy);
opregion->asle->tche = ASLE_TCHE_BLC_EN;
opregion->asle->ardy = ASLE_ARDY_READY;
}
}
 
774,12 → 815,12
return;
 
if (opregion->asle)
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
 
cancel_work_sync(&dev_priv->opregion.asle_work);
 
if (opregion->acpi) {
iowrite32(0, &opregion->acpi->drdy);
opregion->acpi->drdy = 0;
 
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
786,7 → 827,7
}
 
/* just clear all opregion memory pointers now */
iounmap(opregion->header);
memunmap(opregion->header);
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
859,11 → 900,16
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
void __iomem *base;
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
void *base;
 
BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
 
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
875,11 → 921,11
INIT_WORK(&opregion->asle_work, asle_work);
#endif
 
base = acpi_os_ioremap(asls, OPREGION_SIZE);
base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
if (!base)
return -ENOMEM;
 
memcpy_fromio(buf, base, sizeof(buf));
memcpy(buf, base, sizeof(buf));
 
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
891,7 → 937,7
 
opregion->lid_state = base + ACPI_CLID;
 
mboxes = ioread32(&opregion->header->mboxes);
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
906,12 → 952,12
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
 
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
}
 
return 0;
 
err_out:
iounmap(base);
memunmap(base);
return err;
}
/drivers/video/drm/i915/intel_panel.c
30,9 → 30,13
 
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/pwm.h>
#include "intel_drv.h"
 
#define CRC_PMIC_PWM_PERIOD_NS 21333
 
void
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
98,19 → 102,15
/* adjusted_mode has been preset to be the panel's fixed mode */
void
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
struct drm_display_mode *adjusted_mode;
int x, y, width, height;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int x = 0, y = 0, width = 0, height = 0;
 
adjusted_mode = &pipe_config->adjusted_mode;
 
x = y = width = height = 0;
 
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->vdisplay == pipe_config->pipe_src_h)
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
goto done;
 
switch (fitting_mode) {
117,35 → 117,35
case DRM_MODE_SCALE_CENTER:
width = pipe_config->pipe_src_w;
height = pipe_config->pipe_src_h;
x = (adjusted_mode->hdisplay - width + 1)/2;
y = (adjusted_mode->vdisplay - height + 1)/2;
x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
break;
 
case DRM_MODE_SCALE_ASPECT:
/* Scale but preserve the aspect ratio */
{
u32 scaled_width = adjusted_mode->hdisplay
u32 scaled_width = adjusted_mode->crtc_hdisplay
* pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w
* adjusted_mode->vdisplay;
* adjusted_mode->crtc_vdisplay;
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / pipe_config->pipe_src_h;
if (width & 1)
width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
height = adjusted_mode->crtc_vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
height = scaled_width / pipe_config->pipe_src_w;
if (height & 1)
height++;
y = (adjusted_mode->vdisplay - height + 1) / 2;
y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
x = 0;
width = adjusted_mode->hdisplay;
width = adjusted_mode->crtc_hdisplay;
} else {
x = y = 0;
width = adjusted_mode->hdisplay;
height = adjusted_mode->vdisplay;
width = adjusted_mode->crtc_hdisplay;
height = adjusted_mode->crtc_vdisplay;
}
}
break;
152,8 → 152,8
 
case DRM_MODE_SCALE_FULLSCREEN:
x = y = 0;
width = adjusted_mode->hdisplay;
height = adjusted_mode->vdisplay;
width = adjusted_mode->crtc_hdisplay;
height = adjusted_mode->crtc_vdisplay;
break;
 
default:
168,46 → 168,46
}
 
static void
centre_horizontally(struct drm_display_mode *mode,
centre_horizontally(struct drm_display_mode *adjusted_mode,
int width)
{
u32 border, sync_pos, blank_width, sync_width;
 
/* keep the hsync and hblank widths constant */
sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
 
border = (mode->hdisplay - width + 1) / 2;
border = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
border += border & 1; /* make the border even */
 
mode->crtc_hdisplay = width;
mode->crtc_hblank_start = width + border;
mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
adjusted_mode->crtc_hdisplay = width;
adjusted_mode->crtc_hblank_start = width + border;
adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width;
 
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos;
adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width;
}
 
static void
centre_vertically(struct drm_display_mode *mode,
centre_vertically(struct drm_display_mode *adjusted_mode,
int height)
{
u32 border, sync_pos, blank_width, sync_width;
 
/* keep the vsync and vblank widths constant */
sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start;
sync_pos = (blank_width - sync_width + 1) / 2;
 
border = (mode->vdisplay - height + 1) / 2;
border = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
 
mode->crtc_vdisplay = height;
mode->crtc_vblank_start = height + border;
mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
adjusted_mode->crtc_vdisplay = height;
adjusted_mode->crtc_vblank_start = height + border;
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width;
 
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos;
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
}
 
static inline u32 panel_fitter_scaling(u32 source, u32 target)
223,14 → 223,14
return (FACTOR * ratio + FACTOR/2) / FACTOR;
}
 
static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control)
{
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
adjusted_mode->vdisplay;
adjusted_mode->crtc_vdisplay;
 
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
239,19 → 239,19
else if (scaled_width < scaled_height)
*pfit_control |= PFIT_ENABLE |
PFIT_SCALING_LETTER;
else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
}
 
static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
u32 scaled_width = adjusted_mode->hdisplay *
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
adjusted_mode->vdisplay;
adjusted_mode->crtc_vdisplay;
u32 bits;
 
/*
265,9 → 265,9
pipe_config->pipe_src_h);
 
*border = LVDS_BORDER_ENABLE;
if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
bits = panel_fitter_scaling(pipe_config->pipe_src_h,
adjusted_mode->vdisplay);
adjusted_mode->crtc_vdisplay);
 
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
281,9 → 281,9
pipe_config->pipe_src_w);
 
*border = LVDS_BORDER_ENABLE;
if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
bits = panel_fitter_scaling(pipe_config->pipe_src_w,
adjusted_mode->hdisplay);
adjusted_mode->crtc_hdisplay);
 
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
bits << PFIT_VERT_SCALE_SHIFT);
301,18 → 301,16
}
 
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_config *pipe_config,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
struct drm_display_mode *adjusted_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
adjusted_mode = &pipe_config->adjusted_mode;
 
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->vdisplay == pipe_config->pipe_src_h)
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
goto out;
 
switch (fitting_mode) {
338,8 → 336,8
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
pfit_control |= PFIT_ENABLE;
if (INTEL_INFO(dev)->gen >= 4)
pfit_control |= PFIT_SCALING_AUTO;
383,7 → 381,7
 
/* Assume that the BIOS does not lie through the OpRegion... */
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
return *dev_priv->opregion.lid_state & 0x1 ?
connector_status_connected :
connector_status_disconnected;
}
480,7 → 478,7
return val;
}
 
static u32 bdw_get_backlight(struct intel_connector *connector)
static u32 lpt_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
535,6 → 533,24
return _vlv_get_backlight(dev, pipe);
}
 
static u32 bxt_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct intel_panel *panel = &connector->panel;
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
 
static u32 pwm_get_backlight(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
int duty_ns;
 
duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
}
 
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
545,7 → 561,7
mutex_lock(&dev_priv->backlight_lock);
 
if (panel->backlight.enabled) {
val = dev_priv->display.get_backlight(connector);
val = panel->backlight.get(connector);
val = intel_panel_compute_brightness(connector, val);
}
 
555,7 → 571,7
return val;
}
 
static void bdw_set_backlight(struct intel_connector *connector, u32 level)
static void lpt_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
615,16 → 631,32
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
 
static void
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
static void bxt_set_backlight(struct intel_connector *connector, u32 level)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
 
I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
 
static void pwm_set_backlight(struct intel_connector *connector, u32 level)
{
struct intel_panel *panel = &connector->panel;
int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
 
pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
}
 
static void
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
{
struct intel_panel *panel = &connector->panel;
 
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
 
level = intel_panel_compute_brightness(connector, level);
dev_priv->display.set_backlight(connector, level);
panel->backlight.set(connector, level);
}
 
/* set backlight brightness to level in range [0..max], scaling wrt hw min */
680,13 → 712,35
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
 
mutex_unlock(&dev_priv->backlight_lock);
}
 
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
static void lpt_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
 
mutex_unlock(&dev_priv->backlight_lock);
intel_panel_actually_set_backlight(connector, 0);
 
/*
* Although we don't support or enable CPU PWM with LPT/SPT based
* systems, it may have been enabled prior to loading the
* driver. Disable to avoid warnings on LCPLL disable.
*
* This needs rework if we need to add support for CPU PWM on PCH split
* platforms.
*/
tmp = I915_READ(BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("cpu backlight was enabled, disabling\n");
I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
}
 
tmp = I915_READ(BLC_PWM_PCH_CTL1);
I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
 
static void pch_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
735,6 → 789,36
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
}
 
static void bxt_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 tmp, val;
 
intel_panel_actually_set_backlight(connector, 0);
 
tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
tmp & ~BXT_BLC_PWM_ENABLE);
 
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
val &= ~UTIL_PIN_ENABLE;
I915_WRITE(UTIL_PIN_CTL, val);
}
}
 
static void pwm_disable_backlight(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
 
/* Disable the backlight */
pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
usleep_range(2000, 3000);
pwm_disable(panel->backlight.pwm);
}
 
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
745,7 → 829,7
return;
 
/*
* Do not disable backlight on the vgaswitcheroo path. When switching
* Do not disable backlight on the vga_switcheroo path. When switching
* away from i915, the other client may depend on i915 to handle the
* backlight. This will leave the backlight on unnecessarily when
* another client is not activated.
758,12 → 842,12
mutex_lock(&dev_priv->backlight_lock);
 
panel->backlight.enabled = false;
dev_priv->display.disable_backlight(connector);
panel->backlight.disable(connector);
 
mutex_unlock(&dev_priv->backlight_lock);
}
 
static void bdw_enable_backlight(struct intel_connector *connector)
static void lpt_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
871,6 → 955,14
 
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
/*
* Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
if (IS_GEN2(dev))
I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
 
static void i965_enable_backlight(struct intel_connector *connector)
939,6 → 1031,64
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
 
static void bxt_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 pwm_ctl, val;
 
/* To use 2nd set of backlight registers, utility pin has to be
* enabled with PWM mode.
* The field should only be changed when the utility pin is disabled
*/
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
DRM_DEBUG_KMS("util pin already enabled\n");
val &= ~UTIL_PIN_ENABLE;
I915_WRITE(UTIL_PIN_CTL, val);
}
 
val = 0;
if (panel->backlight.util_pin_active_low)
val |= UTIL_PIN_POLARITY;
I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) |
UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
}
 
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
 
I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.max);
 
intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
pwm_ctl = 0;
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
 
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
 
static void pwm_enable_backlight(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
 
pwm_enable(panel->backlight.pwm);
intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
 
void intel_panel_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
955,11 → 1105,7
 
WARN_ON(panel->backlight.max == 0);
 
if (panel->backlight.level == 0) {
panel->backlight.level = panel->backlight.max;
}
 
dev_priv->display.enable_backlight(connector);
panel->backlight.enable(connector);
panel->backlight.enabled = true;
 
mutex_unlock(&dev_priv->backlight_lock);
977,6 → 1123,23
bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector, bd->props.brightness,
bd->props.max_brightness);
 
/*
* Allow flipping bl_power as a sub-state of enabled. Sadly the
* backlight class device does not make it easy to to differentiate
* between callbacks for brightness and bl_power, so our backlight_power
* callback needs to take this into account.
*/
if (panel->backlight.enabled) {
if (panel->backlight.power) {
bool enable = bd->props.power == FB_BLANK_UNBLANK &&
bd->props.brightness != 0;
panel->backlight.power(connector, enable);
}
} else {
bd->props.power = FB_BLANK_POWERDOWN;
}
 
drm_modeset_unlock(&dev->mode_config.connection_mutex);
return 0;
}
1079,11 → 1242,151
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
/*
* Note: The setup hooks can't assume pipe is set!
* SPT: This value represents the period of the PWM stream in clock periods
* multiplied by 16 (default increment) or 128 (alternate increment selected in
* SCHICKEN_1 bit 0). PWM clock is 24 MHz.
*/
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mul, clock;
 
if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
mul = 128;
else
mul = 16;
 
clock = MHz(24);
 
return clock / (pwm_freq_hz * mul);
}
 
/*
* LPT: This value represents the period of the PWM stream in clock periods
* multiplied by 128 (default increment) or 16 (alternate increment, selected in
* LPT SOUTH_CHICKEN2 register bit 5).
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mul, clock;
 
if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
mul = 16;
else
mul = 128;
 
if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
 
return clock / (pwm_freq_hz * mul);
}
 
/*
* ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
* display raw clocks multiplied by 128.
*/
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
int clock = MHz(intel_pch_rawclk(dev));
 
return clock / (pwm_freq_hz * 128);
}
 
/*
* Gen2: This field determines the number of time base events (display core
* clock frequency/32) in total for a complete cycle of modulated backlight
* control.
*
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
* appropriately when it's 0. Use VBT and/or sane defaults.
* Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
* divided by 32.
*/
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int clock;
 
if (IS_PINEVIEW(dev))
clock = intel_hrawclk(dev);
else
clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
 
return clock / (pwm_freq_hz * 32);
}
 
/*
* Gen4: This value represents the period of the PWM stream in display core
* clocks multiplied by 128.
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
 
return clock / (pwm_freq_hz * 128);
}
 
/*
* VLV: This value represents the period of the PWM stream in display core
* clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
* multiplied by 16. CHV uses a 19.2MHz S0IX clock.
*/
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int clock;
 
if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
if (IS_CHERRYVIEW(dev))
return KHz(19200) / (pwm_freq_hz * 16);
else
return MHz(25) / (pwm_freq_hz * 16);
} else {
clock = intel_hrawclk(dev);
return MHz(clock) / (pwm_freq_hz * 128);
}
}
 
static u32 get_backlight_max_vbt(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
u32 pwm;
 
if (!pwm_freq_hz) {
DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
return 0;
}
 
if (!panel->backlight.hz_to_pwm) {
DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
return 0;
}
 
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
DRM_DEBUG_KMS("backlight frequency conversion failed\n");
return 0;
}
 
DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
 
return pwm;
}
 
/*
* Note: The setup hooks can't assume pipe is set!
*/
static u32 get_backlight_min_vbt(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
1110,7 → 1413,7
return scale(min, 0, 255, 0, panel->backlight.max);
}
 
static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1122,12 → 1425,16
 
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
 
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
 
if (!panel->backlight.max)
return -ENODEV;
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = bdw_get_backlight(connector);
val = lpt_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
1148,7 → 1455,11
 
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
 
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
 
if (!panel->backlight.max)
return -ENODEV;
 
panel->backlight.min = get_backlight_min_vbt(connector);
1179,12 → 1490,18
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
 
panel->backlight.max = ctl >> 17;
if (panel->backlight.combination_mode)
panel->backlight.max *= 0xff;
 
if (!panel->backlight.max) {
panel->backlight.max = get_backlight_max_vbt(connector);
panel->backlight.max >>= 1;
}
 
if (!panel->backlight.max)
return -ENODEV;
 
if (panel->backlight.combination_mode)
panel->backlight.max *= 0xff;
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = i9xx_get_backlight(connector);
1208,12 → 1525,16
 
ctl = I915_READ(BLC_PWM_CTL);
panel->backlight.max = ctl >> 16;
if (panel->backlight.combination_mode)
panel->backlight.max *= 0xff;
 
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
 
if (!panel->backlight.max)
return -ENODEV;
 
if (panel->backlight.combination_mode)
panel->backlight.max *= 0xff;
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = i9xx_get_backlight(connector);
1230,21 → 1551,8
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe p;
u32 ctl, ctl2, val;
 
for_each_pipe(dev_priv, p) {
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
 
/* Skip if the modulation freq is already set */
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
continue;
 
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
cur_val);
}
 
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
 
1253,7 → 1561,11
 
ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
 
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
 
if (!panel->backlight.max)
return -ENODEV;
 
panel->backlight.min = get_backlight_min_vbt(connector);
1267,6 → 1579,86
return 0;
}
 
static int
bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
 
/*
* For BXT hard coding the Backlight controller to 0.
* TODO : Read the controller value from VBT and generalize
*/
panel->backlight.controller = 0;
 
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
 
/* Keeping the check if controller 1 is to be programmed.
* This will come into affect once the VBT parsing
* is fixed for controller selection, and controller 1 is used
* for a prticular display configuration.
*/
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
val & UTIL_PIN_POLARITY;
}
 
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.max =
I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
 
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
 
if (!panel->backlight.max)
return -ENODEV;
 
val = bxt_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
panel->backlight.level != 0;
 
return 0;
}
 
static int pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct intel_panel *panel = &connector->panel;
int retval;
 
/* Get the PWM chip for backlight control */
panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
if (IS_ERR(panel->backlight.pwm)) {
DRM_ERROR("Failed to own the pwm chip\n");
panel->backlight.pwm = NULL;
return -ENODEV;
}
 
retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
CRC_PMIC_PWM_PERIOD_NS);
if (retval < 0) {
DRM_ERROR("Failed to configure the pwm chip\n");
pwm_put(panel->backlight.pwm);
panel->backlight.pwm = NULL;
return retval;
}
 
panel->backlight.min = 0; /* 0% */
panel->backlight.max = 100; /* 100% */
panel->backlight.level = DIV_ROUND_UP(
pwm_get_duty_cycle(panel->backlight.pwm) * 100,
CRC_PMIC_PWM_PERIOD_NS);
panel->backlight.enabled = panel->backlight.level != 0;
 
return 0;
}
 
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
1284,9 → 1676,13
}
}
 
/* ensure intel_panel has been initialized first */
if (WARN_ON(!panel->backlight.setup))
return -ENODEV;
 
/* set level and max in panel struct */
mutex_lock(&dev_priv->backlight_lock);
ret = dev_priv->display.setup_backlight(intel_connector, pipe);
ret = panel->backlight.setup(intel_connector, pipe);
mutex_unlock(&dev_priv->backlight_lock);
 
if (ret) {
1310,44 → 1706,74
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
 
/* dispose of the pwm */
if (panel->backlight.pwm)
pwm_put(panel->backlight.pwm);
 
panel->backlight.present = false;
}
 
/* Set up chip specific backlight functions */
void intel_panel_init_backlight_funcs(struct drm_device *dev)
static void
intel_panel_init_backlight_funcs(struct intel_panel *panel)
{
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
dev_priv->display.set_backlight = bdw_set_backlight;
dev_priv->display.get_backlight = bdw_get_backlight;
if (IS_BROXTON(dev)) {
panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight;
panel->backlight.disable = bxt_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
panel->backlight.set = lpt_set_backlight;
panel->backlight.get = lpt_get_backlight;
if (HAS_PCH_LPT(dev))
panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
else
panel->backlight.hz_to_pwm = spt_hz_to_pwm;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.setup_backlight = pch_setup_backlight;
dev_priv->display.enable_backlight = pch_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
dev_priv->display.set_backlight = pch_set_backlight;
dev_priv->display.get_backlight = pch_get_backlight;
panel->backlight.setup = pch_setup_backlight;
panel->backlight.enable = pch_enable_backlight;
panel->backlight.disable = pch_disable_backlight;
panel->backlight.set = pch_set_backlight;
panel->backlight.get = pch_get_backlight;
panel->backlight.hz_to_pwm = pch_hz_to_pwm;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.setup_backlight = vlv_setup_backlight;
dev_priv->display.enable_backlight = vlv_enable_backlight;
dev_priv->display.disable_backlight = vlv_disable_backlight;
dev_priv->display.set_backlight = vlv_set_backlight;
dev_priv->display.get_backlight = vlv_get_backlight;
if (dev_priv->vbt.has_mipi) {
panel->backlight.setup = pwm_setup_backlight;
panel->backlight.enable = pwm_enable_backlight;
panel->backlight.disable = pwm_disable_backlight;
panel->backlight.set = pwm_set_backlight;
panel->backlight.get = pwm_get_backlight;
} else {
panel->backlight.setup = vlv_setup_backlight;
panel->backlight.enable = vlv_enable_backlight;
panel->backlight.disable = vlv_disable_backlight;
panel->backlight.set = vlv_set_backlight;
panel->backlight.get = vlv_get_backlight;
panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
}
} else if (IS_GEN4(dev)) {
dev_priv->display.setup_backlight = i965_setup_backlight;
dev_priv->display.enable_backlight = i965_enable_backlight;
dev_priv->display.disable_backlight = i965_disable_backlight;
dev_priv->display.set_backlight = i9xx_set_backlight;
dev_priv->display.get_backlight = i9xx_get_backlight;
panel->backlight.setup = i965_setup_backlight;
panel->backlight.enable = i965_enable_backlight;
panel->backlight.disable = i965_disable_backlight;
panel->backlight.set = i9xx_set_backlight;
panel->backlight.get = i9xx_get_backlight;
panel->backlight.hz_to_pwm = i965_hz_to_pwm;
} else {
dev_priv->display.setup_backlight = i9xx_setup_backlight;
dev_priv->display.enable_backlight = i9xx_enable_backlight;
dev_priv->display.disable_backlight = i9xx_disable_backlight;
dev_priv->display.set_backlight = i9xx_set_backlight;
dev_priv->display.get_backlight = i9xx_get_backlight;
panel->backlight.setup = i9xx_setup_backlight;
panel->backlight.enable = i9xx_enable_backlight;
panel->backlight.disable = i9xx_disable_backlight;
panel->backlight.set = i9xx_set_backlight;
panel->backlight.get = i9xx_get_backlight;
panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
}
}
 
1355,6 → 1781,8
struct drm_display_mode *fixed_mode,
struct drm_display_mode *downclock_mode)
{
intel_panel_init_backlight_funcs(panel);
 
panel->fixed_mode = fixed_mode;
panel->downclock_mode = downclock_mode;
 
/drivers/video/drm/i915/intel_pm.c
28,28 → 28,14
//#include <linux/cpufreq.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <linux/math64.h>
//#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
 
#include <drm/i915_powerwell.h>
 
#define FORCEWAKE_ACK_TIMEOUT_MS 2
 
void getrawmonotonic(struct timespec *ts);
 
union ktime {
s64 tv64;
};
 
typedef union ktime ktime_t; /* Kill this */
 
#define ktime_to_ns(kt) ((kt).tv64)
 
static inline u64 ktime_get_raw_ns(void)
{
return 0; //ktime_to_ns(ktime_get_raw());
}
/**
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
71,649 → 57,22
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
 
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
*
* The benefits of FBC are mostly visible with solid backgrounds and
* variation-less patterns.
*
* FBC-related functionality can be enabled by the means of the
* i915.i915_enable_fbc parameter
*/
 
static void gen9_init_clock_gating(struct drm_device *dev)
static void bxt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* WaDisableSDEUnitClockGating:skl
* This seems to be a pre-production w/a.
*/
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
/*
* WaDisableDgMirrorFixInHalfSliceChicken5:skl
* This is a pre-production w/a.
* FIXME:
* GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
*/
I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
~GEN9_DG_MIRROR_FIX_ENABLE);
 
/* Wa4x4STCOptimizationDisable:skl */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
}
 
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
 
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
/* Wait for compressing bit to clear */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
 
DRM_DEBUG_KMS("disabled FBC\n");
}
 
static void i8xx_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
u32 fbc_ctl;
 
dev_priv->fbc.enabled = true;
 
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
 
/* FBC_CTL wants 32B or 64B units */
if (IS_GEN2(dev))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
 
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
 
if (IS_GEN4(dev)) {
u32 fbc_ctl2;
 
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
}
 
/* enable it... */
fbc_ctl = I915_READ(FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
}
 
static bool i8xx_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
 
static void g4x_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
 
static void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool g4x_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void sandybridge_blit_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blt_ecoskpd;
 
/* Make sure blitter notifies FBC of writes */
 
/* Blitter is part of Media powerwell on VLV. No impact of
* his param in other platforms for now */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
 
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
}
 
static void ironlake_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
 
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
 
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
 
static void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool ironlake_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void gen7_enable_fbc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
 
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
 
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
 
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_IVYBRIDGE(dev)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
} else {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
HSW_FBCQ_DIS);
}
 
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
 
sandybridge_blit_fbc_update(dev);
 
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
 
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return dev_priv->fbc.enabled;
}
 
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_GEN8(dev))
return;
 
if (!intel_fbc_enabled(dev))
return;
 
I915_WRITE(MSG_FBC_REND_STATE, value);
}
 
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
struct drm_device *dev = work->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev->struct_mutex);
if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
if (work->crtc->primary->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc);
 
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
dev_priv->fbc.y = work->crtc->y;
}
 
dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
 
kfree(work);
}
 
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc.fbc_work == NULL)
return;
 
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
 
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc.fbc_work);
 
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
dev_priv->fbc.fbc_work = NULL;
}
 
static void intel_enable_fbc(struct drm_crtc *crtc)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!dev_priv->display.enable_fbc)
return;
 
intel_cancel_fbc_work(dev_priv);
 
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc);
return;
}
 
work->crtc = crtc;
work->fb = crtc->primary->fb;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
dev_priv->fbc.fbc_work = work;
 
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
 
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_cancel_fbc_work(dev_priv);
 
if (!dev_priv->display.disable_fbc)
return;
 
dev_priv->display.disable_fbc(dev);
dev_priv->fbc.plane = -1;
}
 
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
enum no_fbc_reason reason)
{
if (dev_priv->fbc.no_fbc_reason == reason)
return false;
 
dev_priv->fbc.no_fbc_reason = reason;
return true;
}
 
/**
* intel_update_fbc - enable/disable FBC as needed
* @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= max_hdisplay in width, max_vdisplay in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
*/
void intel_update_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
 
if (!HAS_FBC(dev)) {
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
}
 
if (!i915.powersave) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
}
 
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
for_each_crtc(dev, tmp_crtc) {
if (intel_crtc_active(tmp_crtc) &&
to_intel_crtc(tmp_crtc)->primary_enabled) {
if (crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
goto out_disable;
}
crtc = tmp_crtc;
}
}
 
if (!crtc || crtc->primary->fb == NULL) {
if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
DRM_DEBUG_KMS("no output, disabling\n");
goto out_disable;
}
 
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode;
 
if (i915.enable_fbc < 0) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
}
if (!i915.enable_fbc) {
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
}
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
goto out_disable;
}
 
if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
max_width = 4096;
max_height = 4096;
} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
max_width = 2048;
max_height = 1536;
}
if (intel_crtc->config.pipe_src_w > max_width ||
intel_crtc->config.pipe_src_h > max_height) {
if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
intel_crtc->plane != PLANE_A) {
if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
DRM_DEBUG_KMS("plane not A, disabling compression\n");
goto out_disable;
}
 
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
goto out_disable;
}
if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
goto out_disable;
}
 
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master())
goto out_disable;
 
if (i915_gem_stolen_setup_compression(dev, obj->base.size,
drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
}
 
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->fbc.plane == intel_crtc->plane &&
dev_priv->fbc.fb_id == fb->base.id &&
dev_priv->fbc.y == crtc->y)
return;
 
if (intel_fbc_enabled(dev)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
intel_disable_fbc(dev);
}
 
intel_enable_fbc(crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
 
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
i915_gem_stolen_cleanup_compression(dev);
}
 
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
883,6 → 242,47
return NULL;
}
 
static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
 
mutex_lock(&dev_priv->rps.hw_lock);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if (enable)
val &= ~FORCE_DDR_HIGH_FREQ;
else
val |= FORCE_DDR_HIGH_FREQ;
val &= ~FORCE_DDR_LOW_FREQ;
val |= FORCE_DDR_FREQ_REQ_ACK;
vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
 
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
 
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
 
mutex_lock(&dev_priv->rps.hw_lock);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
 
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
 
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
struct drm_device *dev = dev_priv->dev;
890,20 → 290,26
 
if (IS_VALLEYVIEW(dev)) {
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
dev_priv->wm.vlv.cxsr = enable;
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
POSTING_READ(FW_BLC_SELF);
} else if (IS_PINEVIEW(dev)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
POSTING_READ(DSPFW3);
} else if (IS_I945G(dev) || IS_I945GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
POSTING_READ(FW_BLC_SELF);
} else if (IS_I915GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
I915_WRITE(INSTPM, val);
POSTING_READ(INSTPM);
} else {
return;
}
912,6 → 318,7
enable ? "enabled" : "disabled");
}
 
 
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
928,6 → 335,61
*/
static const int pessimal_latency_ns = 5000;
 
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
 
static int vlv_get_fifo_size(struct drm_device *dev,
enum pipe pipe, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int sprite0_start, sprite1_start, size;
 
switch (pipe) {
uint32_t dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
dsparb2 = I915_READ(DSPARB2);
dsparb3 = I915_READ(DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
default:
return 0;
}
 
switch (plane) {
case 0:
size = sprite0_start;
break;
case 1:
size = sprite1_start - sprite0_start;
break;
case 2:
size = 512 - 1 - sprite1_start;
break;
default:
return 0;
}
 
DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
pipe_name(pipe), plane == 0 ? "primary" : "sprite",
plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
size);
 
return size;
}
 
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1172,13 → 634,10
 
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode;
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
int clock;
const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
int clock = adjusted_mode->crtc_clock;
 
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
clock = adjusted_mode->crtc_clock;
 
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
pineview_display_wm.fifo_size,
1185,7 → 644,7
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= wm << DSPFW_SR_SHIFT;
reg |= FW_WM(wm, SR);
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
 
1195,7 → 654,7
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
reg |= FW_WM(wm, CURSOR_SR);
I915_WRITE(DSPFW3, reg);
 
/* Display HPLL off SR */
1204,7 → 663,7
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= wm & DSPFW_HPLL_SR_MASK;
reg |= FW_WM(wm, HPLL_SR);
I915_WRITE(DSPFW3, reg);
 
/* cursor HPLL off SR */
1213,7 → 672,7
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
reg |= FW_WM(wm, HPLL_CURSOR);
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
1245,11 → 704,11
return false;
}
 
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
 
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1264,7 → 723,7
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
1332,11 → 791,11
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
pixel_size = crtc->primary->fb->bits_per_pixel / 8;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
 
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
1350,7 → 809,7
*display_wm = entries + display->guard_size;
 
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
 
1359,270 → 818,546
display, cursor);
}
 
static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
int pixel_size,
int *prec_mult,
int *drain_latency)
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
 
static void vlv_write_wm_values(struct intel_crtc *crtc,
const struct vlv_wm_values *wm)
{
struct drm_device *dev = crtc->dev;
int entries;
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
 
if (WARN(clock == 0, "Pixel clock is zero!\n"))
return false;
I915_WRITE(VLV_DDL(pipe),
(wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
(wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
(wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
(wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
 
if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
return false;
I915_WRITE(DSPFW1,
FW_WM(wm->sr.plane, SR) |
FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
I915_WRITE(DSPFW2,
FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
I915_WRITE(DSPFW3,
FW_WM(wm->sr.cursor, CURSOR_SR));
 
entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
if (IS_CHERRYVIEW(dev))
*prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
DRAIN_LATENCY_PRECISION_16;
else
*prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
DRAIN_LATENCY_PRECISION_32;
*drain_latency = (64 * (*prec_mult) * 4) / entries;
if (IS_CHERRYVIEW(dev_priv)) {
I915_WRITE(DSPFW7_CHV,
FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
I915_WRITE(DSPFW8_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
I915_WRITE(DSPFW9_CHV,
FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
I915_WRITE(DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
} else {
I915_WRITE(DSPFW7,
FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
I915_WRITE(DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
}
 
if (*drain_latency > DRAIN_LATENCY_MASK)
*drain_latency = DRAIN_LATENCY_MASK;
/* zero (unused) WM1 watermarks */
I915_WRITE(DSPFW4, 0);
I915_WRITE(DSPFW5, 0);
I915_WRITE(DSPFW6, 0);
I915_WRITE(DSPHOWM1, 0);
 
return true;
POSTING_READ(DSPFW1);
}
 
#undef FW_WM_VLV
 
enum vlv_wm_level {
VLV_WM_LEVEL_PM2,
VLV_WM_LEVEL_PM5,
VLV_WM_LEVEL_DDR_DVFS,
};
 
/* latency must be in 0.1us units. */
static unsigned int vlv_wm_method2(unsigned int pixel_rate,
unsigned int pipe_htotal,
unsigned int horiz_pixels,
unsigned int bytes_per_pixel,
unsigned int latency)
{
unsigned int ret;
 
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
ret = DIV_ROUND_UP(ret, 64);
 
return ret;
}
 
static void vlv_setup_wm_latency(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* all latencies in usec */
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
 
dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
 
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
 
dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
}
}
 
static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
struct intel_crtc *crtc,
const struct intel_plane_state *state,
int level)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int clock, htotal, pixel_size, width, wm;
 
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
 
if (!state->visible)
return 0;
 
pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
clock = crtc->config->base.adjusted_mode.crtc_clock;
htotal = crtc->config->base.adjusted_mode.crtc_htotal;
width = crtc->config->pipe_src_w;
if (WARN_ON(htotal == 0))
htotal = 1;
 
if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
/*
* Update drain latency registers of memory arbiter
*
* Valleyview SoC has a new memory arbiter and needs drain latency registers
* to be programmed. Each plane has a drain latency multiplier and a drain
* latency value.
* FIXME the formula gives values that are
* too big for the cursor FIFO, and hence we
* would never be able to use cursors. For
* now just hardcode the watermark.
*/
wm = 63;
} else {
wm = vlv_wm_method2(clock, htotal, width, pixel_size,
dev_priv->wm.pri_latency[level] * 10);
}
 
static void vlv_update_drain_latency(struct drm_crtc *crtc)
return min_t(int, wm, USHRT_MAX);
}
 
static void vlv_compute_fifo(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pixel_size;
int drain_latency;
enum pipe pipe = intel_crtc->pipe;
int plane_prec, prec_mult, plane_dl;
const int high_precision = IS_CHERRYVIEW(dev) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
struct drm_device *dev = crtc->base.dev;
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
unsigned int total_rate = 0;
const int fifo_size = 512 - 1;
int fifo_extra, fifo_left = fifo_size;
 
plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
(DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
for_each_intel_plane_on_crtc(dev, crtc, plane) {
struct intel_plane_state *state =
to_intel_plane_state(plane->base.state);
 
if (!intel_crtc_active(crtc)) {
I915_WRITE(VLV_DDL(pipe), plane_dl);
return;
if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
continue;
 
if (state->visible) {
wm_state->num_active_planes++;
total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
}
}
 
/* Primary plane Drain Latency */
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
plane_prec = (prec_mult == high_precision) ?
DDL_PLANE_PRECISION_HIGH :
DDL_PLANE_PRECISION_LOW;
plane_dl |= plane_prec | drain_latency;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
struct intel_plane_state *state =
to_intel_plane_state(plane->base.state);
unsigned int rate;
 
if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
plane->wm.fifo_size = 63;
continue;
}
 
/* Cursor Drain Latency
* BPP is always 4 for cursor
*/
pixel_size = 4;
if (!state->visible) {
plane->wm.fifo_size = 0;
continue;
}
 
/* Program cursor DL only if it is enabled */
if (intel_crtc->cursor_base &&
vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
plane_prec = (prec_mult == high_precision) ?
DDL_CURSOR_PRECISION_HIGH :
DDL_CURSOR_PRECISION_LOW;
plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
plane->wm.fifo_size = fifo_size * rate / total_rate;
fifo_left -= plane->wm.fifo_size;
}
 
I915_WRITE(VLV_DDL(pipe), plane_dl);
fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
 
/* spread the remainder evenly */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
int plane_extra;
 
if (fifo_left == 0)
break;
 
if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
continue;
 
/* give it all to the first plane if none are active */
if (plane->wm.fifo_size == 0 &&
wm_state->num_active_planes)
continue;
 
plane_extra = min(fifo_extra, fifo_left);
plane->wm.fifo_size += plane_extra;
fifo_left -= plane_extra;
}
 
#define single_plane_enabled(mask) is_power_of_2(mask)
WARN_ON(fifo_left != 0);
}
 
static void valleyview_update_wm(struct drm_crtc *crtc)
static void vlv_invert_wms(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
struct vlv_wm_state *wm_state = &crtc->wm_state;
int level;
 
vlv_update_drain_latency(crtc);
for (level = 0; level < wm_state->num_levels; level++) {
struct drm_device *dev = crtc->base.dev;
const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
struct intel_plane *plane;
 
if (g4x_compute_wm0(dev, PIPE_A,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
 
if (g4x_compute_wm0(dev, PIPE_B,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
switch (plane->base.type) {
int sprite;
case DRM_PLANE_TYPE_CURSOR:
wm_state->wm[level].cursor = plane->wm.fifo_size -
wm_state->wm[level].cursor;
break;
case DRM_PLANE_TYPE_PRIMARY:
wm_state->wm[level].primary = plane->wm.fifo_size -
wm_state->wm[level].primary;
break;
case DRM_PLANE_TYPE_OVERLAY:
sprite = plane->plane;
wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
wm_state->wm[level].sprite[sprite];
break;
}
}
}
}
 
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&plane_sr, &ignore_cursor_sr) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
2*sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
cxsr_enabled = true;
} else {
cxsr_enabled = false;
intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
static void vlv_compute_wm(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
int level;
 
memset(wm_state, 0, sizeof(*wm_state));
 
wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
 
wm_state->num_active_planes = 0;
 
vlv_compute_fifo(crtc);
 
if (wm_state->num_active_planes != 1)
wm_state->cxsr = false;
 
if (wm_state->cxsr) {
for (level = 0; level < wm_state->num_levels; level++) {
wm_state->sr[level].plane = sr_fifo_size;
wm_state->sr[level].cursor = 63;
}
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
"B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
for_each_intel_plane_on_crtc(dev, crtc, plane) {
struct intel_plane_state *state =
to_intel_plane_state(plane->base.state);
 
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
(planea_wm << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
if (!state->visible)
continue;
 
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
/* normal watermarks */
for (level = 0; level < wm_state->num_levels; level++) {
int wm = vlv_compute_wm_level(plane, crtc, state, level);
int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
 
/* hack */
if (WARN_ON(level == 0 && wm > max_wm))
wm = max_wm;
 
if (wm > plane->wm.fifo_size)
break;
 
switch (plane->base.type) {
int sprite;
case DRM_PLANE_TYPE_CURSOR:
wm_state->wm[level].cursor = wm;
break;
case DRM_PLANE_TYPE_PRIMARY:
wm_state->wm[level].primary = wm;
break;
case DRM_PLANE_TYPE_OVERLAY:
sprite = plane->plane;
wm_state->wm[level].sprite[sprite] = wm;
break;
}
}
 
static void cherryview_update_wm(struct drm_crtc *crtc)
wm_state->num_levels = level;
 
if (!wm_state->cxsr)
continue;
 
/* maxfifo watermarks */
switch (plane->base.type) {
int sprite, level;
case DRM_PLANE_TYPE_CURSOR:
for (level = 0; level < wm_state->num_levels; level++)
wm_state->sr[level].cursor =
wm_state->wm[level].cursor;
break;
case DRM_PLANE_TYPE_PRIMARY:
for (level = 0; level < wm_state->num_levels; level++)
wm_state->sr[level].plane =
min(wm_state->sr[level].plane,
wm_state->wm[level].primary);
break;
case DRM_PLANE_TYPE_OVERLAY:
sprite = plane->plane;
for (level = 0; level < wm_state->num_levels; level++)
wm_state->sr[level].plane =
min(wm_state->sr[level].plane,
wm_state->wm[level].sprite[sprite]);
break;
}
}
 
/* clear any (partially) filled invalid levels */
for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
}
 
vlv_invert_wms(crtc);
}
 
#define VLV_FIFO(plane, value) \
(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
 
static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, planec_wm;
int cursora_wm, cursorb_wm, cursorc_wm;
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane;
int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
 
vlv_update_drain_latency(crtc);
for_each_intel_plane_on_crtc(dev, crtc, plane) {
if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
WARN_ON(plane->wm.fifo_size != 63);
continue;
}
 
if (g4x_compute_wm0(dev, PIPE_A,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
sprite0_start = plane->wm.fifo_size;
else if (plane->plane == 0)
sprite1_start = sprite0_start + plane->wm.fifo_size;
else
fifo_size = sprite1_start + plane->wm.fifo_size;
}
 
if (g4x_compute_wm0(dev, PIPE_B,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
WARN_ON(fifo_size != 512 - 1);
 
if (g4x_compute_wm0(dev, PIPE_C,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planec_wm, &cursorc_wm))
enabled |= 1 << PIPE_C;
DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
pipe_name(crtc->pipe), sprite0_start,
sprite1_start, fifo_size);
 
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&plane_sr, &ignore_cursor_sr) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
2*sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
cxsr_enabled = true;
} else {
cxsr_enabled = false;
intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
switch (crtc->pipe) {
uint32_t dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
 
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
VLV_FIFO(SPRITEB, sprite1_start));
 
dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
VLV_FIFO(SPRITEB_HI, 0x1));
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
 
I915_WRITE(DSPARB, dsparb);
I915_WRITE(DSPARB2, dsparb2);
break;
case PIPE_B:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
 
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
VLV_FIFO(SPRITED, sprite1_start));
 
dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
VLV_FIFO(SPRITED_HI, 0xff));
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
 
I915_WRITE(DSPARB, dsparb);
I915_WRITE(DSPARB2, dsparb2);
break;
case PIPE_C:
dsparb3 = I915_READ(DSPARB3);
dsparb2 = I915_READ(DSPARB2);
 
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
VLV_FIFO(SPRITEF, sprite1_start));
 
dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
VLV_FIFO(SPRITEF_HI, 0xff));
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
 
I915_WRITE(DSPARB3, dsparb3);
I915_WRITE(DSPARB2, dsparb2);
break;
default:
break;
}
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
"B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
"SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
planec_wm, cursorc_wm,
plane_sr, cursor_sr);
#undef VLV_FIFO
 
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
(planea_wm << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
I915_WRITE(DSPFW9_CHV,
(I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
DSPFW_CURSORC_MASK)) |
(planec_wm << DSPFW_PLANEC_SHIFT) |
(cursorc_wm << DSPFW_CURSORC_SHIFT));
static void vlv_merge_wm(struct drm_device *dev,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_crtcs = 0;
 
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
wm->level = to_i915(dev)->wm.max_level;
wm->cxsr = true;
 
for_each_intel_crtc(dev, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm_state;
 
if (!crtc->active)
continue;
 
if (!wm_state->cxsr)
wm->cxsr = false;
 
num_active_crtcs++;
wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
}
 
static void valleyview_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width,
uint32_t sprite_height,
int pixel_size,
bool enabled, bool scaled)
if (num_active_crtcs != 1)
wm->cxsr = false;
 
if (num_active_crtcs > 1)
wm->level = VLV_WM_LEVEL_PM2;
 
for_each_intel_crtc(dev, crtc) {
struct vlv_wm_state *wm_state = &crtc->wm_state;
enum pipe pipe = crtc->pipe;
 
if (!crtc->active)
continue;
 
wm->pipe[pipe] = wm_state->wm[wm->level];
if (wm->cxsr)
wm->sr = wm_state->sr[wm->level];
 
wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
}
}
 
static void vlv_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_plane(plane)->pipe;
int sprite = to_intel_plane(plane)->plane;
int drain_latency;
int plane_prec;
int sprite_dl;
int prec_mult;
const int high_precision = IS_CHERRYVIEW(dev) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct vlv_wm_values wm = {};
 
sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
(DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
vlv_compute_wm(intel_crtc);
vlv_merge_wm(dev, &wm);
 
if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
&drain_latency)) {
plane_prec = (prec_mult == high_precision) ?
DDL_SPRITE_PRECISION_HIGH(sprite) :
DDL_SPRITE_PRECISION_LOW(sprite);
sprite_dl |= plane_prec |
(drain_latency << DDL_SPRITE_SHIFT(sprite));
if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
/* FIXME should be part of crtc atomic commit */
vlv_pipe_set_fifo_size(intel_crtc);
return;
}
 
I915_WRITE(VLV_DDL(pipe), sprite_dl);
if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
chv_set_memory_dvfs(dev_priv, false);
 
if (wm.level < VLV_WM_LEVEL_PM5 &&
dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
chv_set_memory_pm5(dev_priv, false);
 
if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
intel_set_memory_cxsr(dev_priv, false);
 
/* FIXME should be part of crtc atomic commit */
vlv_pipe_set_fifo_size(intel_crtc);
 
vlv_write_wm_values(intel_crtc, &wm);
 
DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
"sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
 
if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
intel_set_memory_cxsr(dev_priv, true);
 
if (wm.level >= VLV_WM_LEVEL_PM5 &&
dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
chv_set_memory_pm5(dev_priv, true);
 
if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
chv_set_memory_dvfs(dev_priv, true);
 
dev_priv->wm.vlv = wm;
}
 
#define single_plane_enabled(mask) is_power_of_2(mask)
 
static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
1665,17 → 1400,17
plane_sr, cursor_sr);
 
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
(planea_wm << DSPFW_PLANEA_SHIFT));
FW_WM(plane_sr, SR) |
FW_WM(cursorb_wm, CURSORB) |
FW_WM(planeb_wm, PLANEB) |
FW_WM(planea_wm, PLANEA));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
FW_WM(cursora_wm, CURSORA));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
FW_WM(cursor_sr, CURSOR_SR));
 
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
1695,12 → 1430,11
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(crtc)->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
 
1718,7 → 1452,7
entries, srwm);
 
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * to_intel_crtc(crtc)->cursor_width;
pixel_size * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
1741,19 → 1475,21
srwm);
 
/* 965 has limitations... */
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
(8 << DSPFW_CURSORB_SHIFT) |
(8 << DSPFW_PLANEB_SHIFT) |
(8 << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
(8 << DSPFW_PLANEC_SHIFT_OLD));
I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
FW_WM(8, CURSORB) |
FW_WM(8, PLANEB) |
FW_WM(8, PLANEA));
I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
 
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
 
#undef FW_WM
 
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
1777,11 → 1513,11
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = crtc->primary->fb->bits_per_pixel / 8;
int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
 
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
1799,11 → 1535,11
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
int cpp = crtc->primary->fb->bits_per_pixel / 8;
int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
 
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
1822,7 → 1558,7
if (IS_I915GM(dev) && enabled) {
struct drm_i915_gem_object *obj;
 
obj = intel_fb_obj(enabled->primary->fb);
obj = intel_fb_obj(enabled->primary->state->fb);
 
/* self-refresh seems busted with untiled */
if (obj->tiling_mode == I915_TILING_NONE)
1841,12 → 1577,11
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *adjusted_mode =
&to_intel_crtc(enabled)->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
 
1898,7 → 1633,7
if (crtc == NULL)
return;
 
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
1911,23 → 1646,22
I915_WRITE(FW_BLC, fwater_lo);
}
 
static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct drm_crtc *crtc)
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate;
 
pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
 
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
 
if (intel_crtc->config.pch_pfit.enabled) {
if (pipe_config->pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
uint32_t pfit_size = pipe_config->pch_pfit.size;
 
pipe_w = intel_crtc->config.pipe_src_w;
pipe_h = intel_crtc->config.pipe_src_h;
pipe_w = pipe_config->pipe_src_w;
pipe_h = pipe_config->pipe_src_h;
 
pfit_w = (pfit_size >> 16) & 0xFFFF;
pfit_h = pfit_size & 0xFFFF;
if (pipe_w < pfit_w)
1984,18 → 1718,8
uint32_t pipe_htotal;
uint32_t pixel_rate; /* in KHz */
struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
struct intel_plane_wm_parameters cursor;
};
 
struct ilk_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
uint32_t pixel_rate;
struct intel_plane_wm_parameters pri;
struct intel_plane_wm_parameters spr;
struct intel_plane_wm_parameters cur;
};
 
struct ilk_wm_maximums {
uint16_t pri;
uint16_t spr;
2014,26 → 1738,26
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value,
bool is_lp)
{
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
uint32_t method1, method2;
 
if (!params->active || !params->pri.enabled)
if (!cstate->base.active || !pstate->visible)
return 0;
 
method1 = ilk_wm_method1(params->pixel_rate,
params->pri.bytes_per_pixel,
mem_value);
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
 
if (!is_lp)
return method1;
 
method2 = ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
params->pri.horiz_pixels,
params->pri.bytes_per_pixel,
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->dst),
bpp,
mem_value);
 
return min(method1, method2);
2043,22 → 1767,22
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
uint32_t method1, method2;
 
if (!params->active || !params->spr.enabled)
if (!cstate->base.active || !pstate->visible)
return 0;
 
method1 = ilk_wm_method1(params->pixel_rate,
params->spr.bytes_per_pixel,
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->dst),
bpp,
mem_value);
method2 = ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
params->spr.horiz_pixels,
params->spr.bytes_per_pixel,
mem_value);
return min(method1, method2);
}
 
2066,29 → 1790,33
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
if (!params->active || !params->cur.enabled)
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
 
if (!cstate->base.active || !pstate->visible)
return 0;
 
return ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
params->cur.horiz_pixels,
params->cur.bytes_per_pixel,
return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->dst),
bpp,
mem_value);
}
 
/* Only for WM_LP. */
static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t pri_val)
{
if (!params->active || !params->pri.enabled)
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
 
if (!cstate->base.active || !pstate->visible)
return 0;
 
return ilk_wm_fbc(pri_val,
params->pri.horiz_pixels,
params->pri.bytes_per_pixel);
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
}
 
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2253,10 → 1981,12
}
 
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_crtc *intel_crtc,
int level,
const struct ilk_pipe_wm_parameters *p,
struct intel_crtc_state *cstate,
struct intel_wm_level *result)
{
struct intel_plane *intel_plane;
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
uint16_t spr_latency = dev_priv->wm.spr_latency[level];
uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2268,10 → 1998,29
cur_latency *= 5;
}
 
result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
result->spr_val = ilk_compute_spr_wm(p, spr_latency);
result->cur_val = ilk_compute_cur_wm(p, cur_latency);
result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) {
struct intel_plane_state *pstate =
to_intel_plane_state(intel_plane->base.state);
 
switch (intel_plane->base.type) {
case DRM_PLANE_TYPE_PRIMARY:
result->pri_val = ilk_compute_pri_wm(cstate, pstate,
pri_latency,
level);
result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
result->pri_val);
break;
case DRM_PLANE_TYPE_OVERLAY:
result->spr_val = ilk_compute_spr_wm(cstate, pstate,
spr_latency);
break;
case DRM_PLANE_TYPE_CURSOR:
result->cur_val = ilk_compute_cur_wm(cstate, pstate,
cur_latency);
break;
}
}
 
result->enable = true;
}
 
2280,19 → 2029,19
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
 
if (!intel_crtc_active(crtc))
if (!intel_crtc->active)
return 0;
 
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
intel_ddi_get_cdclk_freq(dev_priv));
linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
adjusted_mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
dev_priv->cdclk_freq);
 
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
2349,6 → 2098,8
GEN9_MEM_LATENCY_LEVEL_MASK;
 
/*
* WaWmMemoryReadLatency:skl
*
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from
* the punit.
2421,7 → 2172,7
int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
if (IS_GEN9(dev))
if (INTEL_INFO(dev)->gen >= 9)
return 7;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
2528,38 → 2279,6
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
}
 
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
struct ilk_pipe_wm_parameters *p)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
 
if (!intel_crtc_active(crtc))
return;
 
p->active = true;
p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
p->cur.bytes_per_pixel = 4;
p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
p->cur.horiz_pixels = intel_crtc->cursor_width;
/* TODO: for now, assume primary and cursor planes are always enabled. */
p->pri.enabled = true;
p->cur.enabled = true;
 
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
if (intel_plane->pipe == pipe) {
p->spr = intel_plane->wm;
break;
}
}
}
 
static void ilk_compute_wm_config(struct drm_device *dev,
struct intel_wm_config *config)
{
2579,34 → 2298,47
}
 
/* Compute new watermarks for the pipe */
static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
const struct ilk_pipe_wm_parameters *params,
static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
struct intel_pipe_wm *pipe_wm)
{
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane;
struct intel_plane_state *sprstate = NULL;
int level, max_level = ilk_wm_max_level(dev);
/* LP0 watermark maximums depend on this pipe alone */
struct intel_wm_config config = {
.num_pipes_active = 1,
.sprites_enabled = params->spr.enabled,
.sprites_scaled = params->spr.scaled,
};
struct ilk_wm_maximums max;
 
pipe_wm->pipe_enabled = params->active;
pipe_wm->sprites_enabled = params->spr.enabled;
pipe_wm->sprites_scaled = params->spr.scaled;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) {
sprstate = to_intel_plane_state(intel_plane->base.state);
break;
}
}
 
config.sprites_enabled = sprstate->visible;
config.sprites_scaled = sprstate->visible &&
(drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
 
pipe_wm->pipe_enabled = cstate->base.active;
pipe_wm->sprites_enabled = sprstate->visible;
pipe_wm->sprites_scaled = config.sprites_scaled;
 
/* ILK/SNB: LP2+ watermarks only w/o sprites */
if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
max_level = 1;
 
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
if (params->spr.scaled)
if (config.sprites_scaled)
max_level = 0;
 
ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]);
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2623,7 → 2355,7
for (level = 1; level <= max_level; level++) {
struct intel_wm_level wm = {};
 
ilk_compute_wm_level(dev_priv, level, params, &wm);
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm);
 
/*
* Disable any watermark level that exceeds the
2680,6 → 2412,7
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int level, max_level = ilk_wm_max_level(dev);
int last_enabled_level = max_level;
 
2720,7 → 2453,8
* What we should check here is whether FBC can be
* enabled sometime later.
*/
if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
intel_fbc_enabled(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
 
3024,6 → 2758,7
*/
 
#define SKL_DDB_SIZE 896 /* in blocks */
#define BXT_DDB_SIZE 512
 
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3042,6 → 2777,9
return;
}
 
if (IS_BROXTON(dev))
ddb_size = BXT_DDB_SIZE;
else
ddb_size = SKL_DDB_SIZE;
 
ddb_size -= 4; /* 4 blocks for bypass path allocation */
3048,7 → 2786,7
 
nth_active_pipe = 0;
for_each_crtc(dev, crtc) {
if (!intel_crtc_active(crtc))
if (!to_intel_crtc(crtc)->active)
continue;
 
if (crtc == for_crtc)
3081,13 → 2819,17
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
int plane;
u32 val;
 
memset(ddb, 0, sizeof(*ddb));
 
for_each_pipe(dev_priv, pipe) {
for_each_plane(pipe, plane) {
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
continue;
 
for_each_plane(dev_priv, pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
3094,13 → 2836,24
}
 
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
val);
}
}
 
static unsigned int
skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
{
 
/* for planar format */
if (p->y_bytes_per_pixel) {
if (y) /* y-plane data rate */
return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
else /* uv-plane data rate */
return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
}
 
/* for packed formats */
return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
}
 
3123,8 → 2876,11
if (!p->enabled)
continue;
 
total_data_rate += skl_plane_relative_data_rate(p);
total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
if (p->y_bytes_per_pixel) {
total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
}
}
 
return total_data_rate;
}
3136,10 → 2892,13
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
uint16_t minimum[I915_MAX_PLANES];
uint16_t y_minimum[I915_MAX_PLANES];
unsigned int total_data_rate;
int plane;
 
3147,20 → 2906,35
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0) {
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
sizeof(ddb->plane[pipe][PLANE_CURSOR]));
return;
}
 
cursor_blocks = skl_cursor_allocation(config);
ddb->cursor[pipe].start = alloc->end - cursor_blocks;
ddb->cursor[pipe].end = alloc->end;
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
 
alloc_size -= cursor_blocks;
alloc->end -= cursor_blocks;
 
/* 1. Allocate the mininum required blocks for each active plane */
for_each_plane(dev_priv, pipe, plane) {
const struct intel_plane_wm_parameters *p;
 
p = &params->plane[plane];
if (!p->enabled)
continue;
 
minimum[plane] = 8;
alloc_size -= minimum[plane];
y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
alloc_size -= y_minimum[plane];
}
 
/*
* Each active plane get a portion of the remaining space, in
* proportion to the amount of data they need to fetch from memory.
* 2. Distribute the remaining space in proportion to the amount of
* data each plane needs to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
3169,20 → 2943,22
start = alloc->start;
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p;
unsigned int data_rate;
uint16_t plane_blocks;
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
 
p = &params->plane[plane];
if (!p->enabled)
continue;
 
data_rate = skl_plane_relative_data_rate(p);
data_rate = skl_plane_relative_data_rate(p, 0);
 
/*
* allocation for (packed formats) or (uv-plane part of planar format):
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
plane_blocks = minimum[plane];
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
 
ddb->plane[pipe][plane].start = start;
3189,14 → 2965,30
ddb->plane[pipe][plane].end = start + plane_blocks;
 
start += plane_blocks;
 
/*
* allocation for y_plane part of planar format:
*/
if (p->y_bytes_per_pixel) {
y_data_rate = skl_plane_relative_data_rate(p, 1);
y_plane_blocks = y_minimum[plane];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
total_data_rate);
 
ddb->y_plane[pipe][plane].start = start;
ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
 
start += y_plane_blocks;
}
 
}
 
static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
}
 
static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
{
/* TODO: Take into account the scalers once we support them */
return config->adjusted_mode.crtc_clock;
return config->base.adjusted_mode.crtc_clock;
}
 
/*
3213,7 → 3005,7
if (latency == 0)
return UINT_MAX;
 
wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
 
return ret;
3221,17 → 3013,29
 
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
uint32_t latency)
uint64_t tiling, uint32_t latency)
{
uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
uint32_t ret;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t wm_intermediate_val;
 
if (latency == 0)
return UINT_MAX;
 
plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
 
if (tiling == I915_FORMAT_MOD_Y_TILED ||
tiling == I915_FORMAT_MOD_Yf_TILED) {
plane_bytes_per_line *= 4;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
plane_blocks_per_line /= 4;
} else {
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
}
 
wm_intermediate_val = latency * pixel_rate;
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
plane_bytes_per_line;
plane_blocks_per_line;
 
return ret;
}
3248,8 → 3052,8
sizeof(new_ddb->plane[pipe])))
return true;
 
if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
sizeof(new_ddb->cursor[pipe])))
if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR],
sizeof(new_ddb->plane[pipe][PLANE_CURSOR])))
return true;
 
return false;
3262,7 → 3066,7
struct drm_plane *plane;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
config->num_pipes_active += intel_crtc_active(crtc);
config->num_pipes_active += to_intel_crtc(crtc)->active;
 
/* FIXME: I don't think we need those two global parameters on SKL */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3280,71 → 3084,129
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
struct drm_framebuffer *fb;
int i = 1; /* Index for sprite planes start */
 
p->active = intel_crtc_active(crtc);
p->active = intel_crtc->active;
if (p->active) {
p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
 
/*
* For now, assume primary and cursor planes are always enabled.
*/
fb = crtc->primary->state->fb;
/* For planar: Bpp is for uv plane, y_Bpp is for y plane */
if (fb) {
p->plane[0].enabled = true;
p->plane[0].bytes_per_pixel =
crtc->primary->fb->bits_per_pixel / 8;
p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
drm_format_plane_cpp(fb->pixel_format, 0) : 0;
p->plane[0].tiling = fb->modifier[0];
} else {
p->plane[0].enabled = false;
p->plane[0].bytes_per_pixel = 0;
p->plane[0].y_bytes_per_pixel = 0;
p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
}
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
p->plane[0].rotation = crtc->primary->state->rotation;
 
p->cursor.enabled = true;
p->cursor.bytes_per_pixel = 4;
p->cursor.horiz_pixels = intel_crtc->cursor_width ?
intel_crtc->cursor_width : 64;
fb = crtc->cursor->state->fb;
p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
if (fb) {
p->plane[PLANE_CURSOR].enabled = true;
p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
} else {
p->plane[PLANE_CURSOR].enabled = false;
p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
p->plane[PLANE_CURSOR].horiz_pixels = 64;
p->plane[PLANE_CURSOR].vert_pixels = 64;
}
}
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
if (intel_plane->pipe == pipe)
if (intel_plane->pipe == pipe &&
plane->type == DRM_PLANE_TYPE_OVERLAY)
p->plane[i++] = intel_plane->wm;
}
}
 
static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
struct skl_pipe_wm_parameters *p,
struct intel_plane_wm_parameters *p_params,
uint16_t ddb_allocation,
uint32_t mem_value,
int level,
uint16_t *out_blocks, /* out */
uint8_t *out_lines /* out */)
{
uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
uint32_t result_bytes;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines;
uint32_t selected_result;
uint8_t bytes_per_pixel;
 
if (mem_value == 0 || !p->active || !p_params->enabled)
if (latency == 0 || !p->active || !p_params->enabled)
return false;
 
bytes_per_pixel = p_params->y_bytes_per_pixel ?
p_params->y_bytes_per_pixel :
p_params->bytes_per_pixel;
method1 = skl_wm_method1(p->pixel_rate,
p_params->bytes_per_pixel,
mem_value);
bytes_per_pixel,
latency);
method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal,
p_params->horiz_pixels,
p_params->bytes_per_pixel,
mem_value);
bytes_per_pixel,
p_params->tiling,
latency);
 
plane_bytes_per_line = p_params->horiz_pixels *
p_params->bytes_per_pixel;
plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
/* For now xtile and linear */
if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
result_bytes = min(method1, method2);
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
uint32_t min_scanlines = 4;
uint32_t y_tile_minimum;
if (intel_rotation_90_or_270(p_params->rotation)) {
switch (p_params->bytes_per_pixel) {
case 1:
min_scanlines = 16;
break;
case 2:
min_scanlines = 8;
break;
case 8:
WARN(1, "Unsupported pixel depth for rotation");
}
}
y_tile_minimum = plane_blocks_per_line * min_scanlines;
selected_result = max(method2, y_tile_minimum);
} else {
if ((ddb_allocation / plane_blocks_per_line) >= 1)
selected_result = min(method1, method2);
else
result_bytes = method1;
selected_result = method1;
}
 
res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
res_blocks = selected_result + 1;
res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
 
if (res_blocks > ddb_allocation || res_lines > 31)
if (level >= 1 && level <= 7) {
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
res_lines += 4;
else
res_blocks++;
}
 
if (res_blocks >= ddb_allocation || res_lines > 31)
return false;
 
*out_blocks = res_blocks;
3361,7 → 3223,6
int num_planes,
struct skl_wm_level *result)
{
uint16_t latency = dev_priv->wm.skl_latency[level];
uint16_t ddb_blocks;
int i;
 
3368,27 → 3229,32
for (i = 0; i < num_planes; i++) {
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
 
result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
result->plane_en[i] = skl_compute_plane_wm(dev_priv,
p, &p->plane[i],
ddb_blocks,
latency,
level,
&result->plane_res_b[i],
&result->plane_res_l[i]);
}
 
ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
latency, &result->cursor_res_b,
&result->cursor_res_l);
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
&p->plane[PLANE_CURSOR],
ddb_blocks, level,
&result->plane_res_b[PLANE_CURSOR],
&result->plane_res_l[PLANE_CURSOR]);
}
 
static uint32_t
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
{
if (!intel_crtc_active(crtc))
if (!to_intel_crtc(crtc)->active)
return 0;
 
if (WARN_ON(p->pixel_rate == 0))
return 0;
 
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
 
}
 
static void skl_compute_transition_wm(struct drm_crtc *crtc,
3404,7 → 3270,7
/* Until we know more, just disable transition WMs */
for (i = 0; i < intel_num_planes(intel_crtc); i++)
trans_wm->plane_en[i] = false;
trans_wm->cursor_en = false;
trans_wm->plane_en[PLANE_CURSOR] = false;
}
 
static void skl_compute_pipe_wm(struct drm_crtc *crtc,
3453,13 → 3319,13
 
temp = 0;
 
temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
temp |= p_wm->wm[level].cursor_res_b;
temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
 
if (p_wm->wm[level].cursor_en)
if (p_wm->wm[level].plane_en[PLANE_CURSOR])
temp |= PLANE_WM_EN;
 
r->cursor[pipe][level] = temp;
r->plane[pipe][PLANE_CURSOR][level] = temp;
 
}
 
3475,12 → 3341,12
}
 
temp = 0;
temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
temp |= p_wm->trans_wm.cursor_res_b;
if (p_wm->trans_wm.cursor_en)
temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
temp |= PLANE_WM_EN;
 
r->cursor_trans[pipe] = temp;
r->plane_trans[pipe][PLANE_CURSOR] = temp;
 
r->wm_linetime[pipe] = p_wm->linetime;
}
3514,20 → 3380,25
I915_WRITE(PLANE_WM(pipe, i, level),
new->plane[pipe][i][level]);
I915_WRITE(CUR_WM(pipe, level),
new->cursor[pipe][level]);
new->plane[pipe][PLANE_CURSOR][level]);
}
for (i = 0; i < intel_num_planes(crtc); i++)
I915_WRITE(PLANE_WM_TRANS(pipe, i),
new->plane_trans[pipe][i]);
I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
I915_WRITE(CUR_WM_TRANS(pipe),
new->plane_trans[pipe][PLANE_CURSOR]);
 
for (i = 0; i < intel_num_planes(crtc); i++)
for (i = 0; i < intel_num_planes(crtc); i++) {
skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, i),
&new->ddb.plane[pipe][i]);
skl_ddb_entry_write(dev_priv,
PLANE_NV12_BUF_CFG(pipe, i),
&new->ddb.y_plane[pipe][i]);
}
 
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
&new->ddb.cursor[pipe]);
&new->ddb.plane[pipe][PLANE_CURSOR]);
}
}
 
3558,12 → 3429,11
static void
skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
{
struct drm_device *dev = dev_priv->dev;
int plane;
 
DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
 
for_each_plane(pipe, plane) {
for_each_plane(dev_priv, pipe, plane) {
I915_WRITE(PLANE_SURF(pipe, plane),
I915_READ(PLANE_SURF(pipe, plane)));
}
3590,7 → 3460,7
{
struct drm_device *dev = dev_priv->dev;
struct skl_ddb_allocation *cur_ddb, *new_ddb;
bool reallocated[I915_MAX_PIPES] = {false, false, false};
bool reallocated[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
enum pipe pipe;
 
3640,10 → 3510,9
skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
skl_wm_flush_pipe(dev_priv, pipe, 2);
intel_wait_for_vblank(dev, pipe);
}
 
reallocated[pipe] = true;
}
}
 
/*
* Third pass: flush the pipes that got more space allocated.
3684,6 → 3553,7
return false;
 
intel_crtc->wm.skl_active = *pipe_wm;
 
return true;
}
 
3736,6 → 3606,26
}
}
 
static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
{
watermarks->wm_linetime[pipe] = 0;
memset(watermarks->plane[pipe], 0,
sizeof(uint32_t) * 8 * I915_MAX_PLANES);
memset(watermarks->plane_trans[pipe],
0, sizeof(uint32_t) * I915_MAX_PLANES);
watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
 
/* Clear ddb entries for pipe */
memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
memset(&watermarks->ddb.plane[pipe], 0,
sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
memset(&watermarks->ddb.y_plane[pipe], 0,
sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
sizeof(struct skl_ddb_entry));
 
}
 
static void skl_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3746,8 → 3636,12
struct skl_pipe_wm pipe_wm = {};
struct intel_wm_config config = {};
 
memset(results, 0, sizeof(*results));
 
/* Clear all dirty flags */
memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
 
skl_clear_wm(results, intel_crtc->pipe);
 
skl_compute_wm_global_parameters(dev, &config);
 
if (!skl_update_pipe_wm(crtc, &params, &config,
3771,13 → 3665,30
int pixel_size, bool enabled, bool scaled)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane->state->fb;
 
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.bytes_per_pixel = pixel_size;
intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
 
/* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
intel_plane->wm.bytes_per_pixel =
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
intel_plane->wm.y_bytes_per_pixel =
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
 
/*
* Framebuffer can be NULL on plane disable, but it does not
* matter for watermarks if we assume no tiling in that case.
*/
if (fb)
intel_plane->wm.tiling = fb->modifier[0];
intel_plane->wm.rotation = plane->state->rotation;
 
skl_update_wm(crtc);
}
 
3784,10 → 3695,10
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct ilk_wm_maximums max;
struct ilk_pipe_wm_parameters params = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
struct intel_pipe_wm pipe_wm = {};
3794,9 → 3705,9
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct intel_wm_config config = {};
 
ilk_compute_wm_parameters(crtc, &params);
WARN_ON(cstate->base.active != intel_crtc->active);
 
intel_compute_pipe_wm(crtc, &params, &pipe_wm);
intel_compute_pipe_wm(cstate, &pipe_wm);
 
if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
return;
3836,12 → 3747,6
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
 
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
 
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
3873,10 → 3778,10
(val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
} else {
active->wm[level].cursor_en = is_enabled;
active->wm[level].cursor_res_b =
active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
active->wm[level].plane_res_b[PLANE_CURSOR] =
val & PLANE_WM_BLOCKS_MASK;
active->wm[level].cursor_res_l =
active->wm[level].plane_res_l[PLANE_CURSOR] =
(val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
}
3889,10 → 3794,10
(val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
} else {
active->trans_wm.cursor_en = is_enabled;
active->trans_wm.cursor_res_b =
active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
active->trans_wm.plane_res_b[PLANE_CURSOR] =
val & PLANE_WM_BLOCKS_MASK;
active->trans_wm.cursor_res_l =
active->trans_wm.plane_res_l[PLANE_CURSOR] =
(val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
}
3918,14 → 3823,14
for (i = 0; i < intel_num_planes(intel_crtc); i++)
hw->plane[pipe][i][level] =
I915_READ(PLANE_WM(pipe, i, level));
hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
}
 
for (i = 0; i < intel_num_planes(intel_crtc); i++)
hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
 
if (!intel_crtc_active(crtc))
if (!intel_crtc->active)
return;
 
hw->dirty[pipe] = true;
3938,7 → 3843,7
skl_pipe_wm_active_state(temp, active, false,
false, i, level);
}
temp = hw->cursor[pipe][level];
temp = hw->plane[pipe][PLANE_CURSOR][level];
skl_pipe_wm_active_state(temp, active, false, true, i, level);
}
 
3947,7 → 3852,7
skl_pipe_wm_active_state(temp, active, true, false, i, 0);
}
 
temp = hw->cursor_trans[pipe];
temp = hw->plane_trans[pipe][PLANE_CURSOR];
skl_pipe_wm_active_state(temp, active, true, true, i, 0);
}
 
3980,7 → 3885,7
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
active->pipe_enabled = intel_crtc_active(crtc);
active->pipe_enabled = intel_crtc->active;
 
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
4009,6 → 3914,159
}
}
 
#define _FW_WM(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
#define _FW_WM_VLV(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
 
static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
enum pipe pipe;
uint32_t tmp;
 
for_each_pipe(dev_priv, pipe) {
tmp = I915_READ(VLV_DDL(pipe));
 
wm->ddl[pipe].primary =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
wm->ddl[pipe].cursor =
(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
wm->ddl[pipe].sprite[0] =
(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
wm->ddl[pipe].sprite[1] =
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
 
tmp = I915_READ(DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
 
tmp = I915_READ(DSPFW2);
wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
 
tmp = I915_READ(DSPFW3);
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
 
if (IS_CHERRYVIEW(dev_priv)) {
tmp = I915_READ(DSPFW7_CHV);
wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
 
tmp = I915_READ(DSPFW8_CHV);
wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
 
tmp = I915_READ(DSPFW9_CHV);
wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
 
tmp = I915_READ(DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
tmp = I915_READ(DSPFW7);
wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
 
tmp = I915_READ(DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
}
}
 
#undef _FW_WM
#undef _FW_WM_VLV
 
void vlv_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_values *wm = &dev_priv->wm.vlv;
struct intel_plane *plane;
enum pipe pipe;
u32 val;
 
vlv_read_wm_values(dev_priv, wm);
 
for_each_intel_plane(dev, plane) {
switch (plane->base.type) {
int sprite;
case DRM_PLANE_TYPE_CURSOR:
plane->wm.fifo_size = 63;
break;
case DRM_PLANE_TYPE_PRIMARY:
plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
break;
case DRM_PLANE_TYPE_OVERLAY:
sprite = plane->plane;
plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
break;
}
}
 
wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
 
if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->rps.hw_lock);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
 
/*
* If DDR DVFS is disabled in the BIOS, Punit
* will never ack the request. So if that happens
* assume we don't have to enable/disable DDR DVFS
* dynamically. To test that just set the REQ_ACK
* bit to poke the Punit, but don't change the
* HIGH/LOW bits so that we don't actually change
* the current state.
*/
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
val |= FORCE_DDR_FREQ_REQ_ACK;
vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
 
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
 
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
for_each_pipe(dev_priv, pipe)
DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
 
DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
 
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4094,41 → 4152,6
pixel_size, enabled, scaled);
}
 
static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev)
{
struct drm_i915_gem_object *ctx;
int ret;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
ctx = i915_gem_alloc_object(dev, 4096);
if (!ctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
 
ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
 
ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
 
return ctx;
 
err_unpin:
i915_gem_object_ggtt_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
return NULL;
}
 
/**
* Lock protecting IPS related data structures
*/
4190,7 → 4213,7
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
 
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
 
dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4221,10 → 4244,10
 
ironlake_set_drps(dev, fstart);
 
dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
dev_priv->ips.last_count1 = I915_READ(DMIEC) +
I915_READ(DDREC) + I915_READ(CSIEC);
dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
dev_priv->ips.last_count2 = I915_READ(0x112f4);
dev_priv->ips.last_count2 = I915_READ(GFXEC);
dev_priv->ips.last_time2 = ktime_get_raw_ns();
 
spin_unlock_irq(&mchdev_lock);
4261,7 → 4284,7
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
u32 limits;
 
4271,9 → 4294,15
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
if (IS_GEN9(dev_priv->dev)) {
limits = (dev_priv->rps.max_freq_softlimit) << 23;
if (val <= dev_priv->rps.min_freq_softlimit)
limits |= (dev_priv->rps.min_freq_softlimit) << 14;
} else {
limits = dev_priv->rps.max_freq_softlimit << 24;
if (val <= dev_priv->rps.min_freq_softlimit)
limits |= dev_priv->rps.min_freq_softlimit << 16;
}
 
return limits;
}
4281,6 → 4310,8
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
int new_power;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
 
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
4302,9 → 4333,9
break;
}
/* Max/min bins are special */
if (val == dev_priv->rps.min_freq_softlimit)
if (val <= dev_priv->rps.min_freq_softlimit)
new_power = LOW_POWER;
if (val == dev_priv->rps.max_freq_softlimit)
if (val >= dev_priv->rps.max_freq_softlimit)
new_power = HIGH_POWER;
if (new_power == dev_priv->rps.power)
return;
4313,49 → 4344,45
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 95% busy over 16ms */
I915_WRITE(GEN6_RP_UP_EI, 12500);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
ei_up = 16000;
threshold_up = 95;
 
/* Downclock if less than 85% busy over 32ms */
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
ei_down = 32000;
threshold_down = 85;
break;
 
case BETWEEN:
/* Upclock if more than 90% busy over 13ms */
I915_WRITE(GEN6_RP_UP_EI, 10250);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
ei_up = 13000;
threshold_up = 90;
 
/* Downclock if less than 75% busy over 32ms */
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
ei_down = 32000;
threshold_down = 75;
break;
 
case HIGH_POWER:
/* Upclock if more than 85% busy over 10ms */
I915_WRITE(GEN6_RP_UP_EI, 8000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
ei_up = 10000;
threshold_up = 85;
 
/* Downclock if less than 60% busy over 32ms */
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
ei_down = 32000;
threshold_down = 60;
break;
}
 
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
 
I915_WRITE(GEN6_RP_DOWN_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_down));
I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
4363,10 → 4390,10
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
break;
}
 
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
dev_priv->rps.last_adj = 0;
}
 
4375,35 → 4402,29
u32 mask = 0;
 
if (val > dev_priv->rps.min_freq_softlimit)
mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_THRESHOLD;
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
 
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
mask |= GEN6_PM_RP_UP_EI_EXPIRED;
 
if (IS_GEN8(dev_priv->dev))
mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
 
return ~mask;
return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
}
 
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
void gen6_set_rps(struct drm_device *dev, u8 val)
static void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
return;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
WARN_ON(val > dev_priv->rps.max_freq);
WARN_ON(val < dev_priv->rps.min_freq);
 
/* min/max delay may still have been modified so be sure to
* write the limits value.
4411,8 → 4432,11
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
if (IS_GEN9(dev))
I915_WRITE(GEN6_RPNSWREQ,
GEN9_FREQUENCY(val));
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
I915_WRITE(GEN6_RPNSWREQ,
4424,79 → 4448,73
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
POSTING_READ(GEN6_RPNSWREQ);
 
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(val * 50);
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
 
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
*
* * If Gfx is Idle, then
* 1. Mask Turbo interrupts
* 2. Bring up Gfx clock
* 3. Change the freq to Rpn and wait till P-Unit updates freq
* 4. Clear the Force GFX CLK ON bit so that Gfx can down
* 5. Unmask Turbo interrupts
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
static void valleyview_set_rps(struct drm_device *dev, u8 val)
{
struct drm_device *dev = dev_priv->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Latest VLV doesn't need to force the gfx clock */
if (dev->pdev->revision >= 0xd) {
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
return;
}
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq);
WARN_ON(val < dev_priv->rps.min_freq);
 
/*
* When we are idle. Drop to min voltage state.
*/
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
 
if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
return;
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
/* Mask turbo interrupt so that they will not come in between */
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
if (val != dev_priv->rps.cur_freq) {
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (!IS_CHERRYVIEW(dev_priv))
gen6_set_rps_thresholds(dev_priv, val);
}
 
vlv_force_gfx_clock(dev_priv, true);
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
 
dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
*
* * If Gfx is Idle, then
* 1. Forcewake Media well.
* 2. Request idle freq.
* 3. Release Forcewake of Media well.
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
u32 val = dev_priv->rps.idle_freq;
 
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
dev_priv->rps.min_freq_softlimit);
if (dev_priv->rps.cur_freq <= val)
return;
 
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 100))
DRM_ERROR("timed out waiting for Punit\n");
 
vlv_force_gfx_clock(dev_priv, false);
 
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
/* Wake up the media well, as that takes a lot less
* power than the Render well. */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
valleyview_set_rps(dev_priv->dev, val);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
}
 
void gen6_rps_idle(struct drm_i915_private *dev_priv)
void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_CHERRYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
else if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
dev_priv->rps.last_adj = 0;
if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
void gen6_rps_boost(struct drm_i915_private *dev_priv)
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
4503,33 → 4521,62
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
spin_lock(&dev_priv->rps.client_lock);
while (!list_empty(&dev_priv->rps.clients))
list_del_init(dev_priv->rps.clients.next);
spin_unlock(&dev_priv->rps.client_lock);
}
 
void valleyview_set_rps(struct drm_device *dev, u8 val)
void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps,
unsigned long submitted)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* This is intentionally racy! We peek at the state here, then
* validate inside the RPS worker.
*/
if (!(dev_priv->mm.busy &&
dev_priv->rps.enabled &&
dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
return;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
/* Force a RPS boost (and don't count it against the client) if
* the GPU is severely congested.
*/
if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
rps = NULL;
 
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
spin_lock(&dev_priv->rps.client_lock);
if (rps == NULL || list_empty(&rps->link)) {
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.client_boost = true;
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
spin_unlock_irq(&dev_priv->irq_lock);
 
if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (rps != NULL) {
list_add(&rps->link, &dev_priv->rps.clients);
rps->boosts++;
} else
dev_priv->rps.boosts++;
}
spin_unlock(&dev_priv->rps.client_lock);
}
 
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
void intel_set_rps(struct drm_device *dev, u8 val)
{
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
}
 
static void gen9_disable_rps(struct drm_device *dev)
4537,6 → 4584,7
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN9_PG_ENABLE, 0);
}
 
static void gen6_disable_rps(struct drm_device *dev)
4560,11 → 4608,11
 
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
I915_WRITE(GEN6_RC_CONTROL, 0);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
 
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4588,14 → 4636,10
 
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
{
/* No RC6 before Ironlake */
if (INTEL_INFO(dev)->gen < 5)
/* No RC6 before Ironlake and code is gone for ilk. */
if (INTEL_INFO(dev)->gen < 6)
return 0;
 
/* RC6 is only on Ironlake mobile not on desktop */
if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
return 0;
 
/* Respect the kernel parameter if it is set */
if (enable_rc6 >= 0) {
int mask;
4613,10 → 4657,6
return enable_rc6 & mask;
}
 
/* Disable RC6 on Ironlake */
if (INTEL_INFO(dev)->gen == 5)
return 0;
 
if (IS_IVYBRIDGE(dev))
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 
4635,26 → 4675,49
u32 ddcc_status = 0;
int ret;
 
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_BROXTON(dev)) {
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
} else {
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
}
 
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
 
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
if (0 == ret)
dev_priv->rps.efficient_freq =
(ddcc_status >> 8) & 0xff;
clamp_t(u8,
((ddcc_status >> 8) & 0xff),
dev_priv->rps.min_freq,
dev_priv->rps.max_freq);
}
 
if (IS_SKYLAKE(dev)) {
/* Store the frequency values in 16.66 MHZ units, which is
the natural hardware unit for SKL */
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
}
 
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
 
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4662,8 → 4725,8
if (dev_priv->rps.min_freq_softlimit == 0) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dev_priv->rps.min_freq_softlimit =
/* max(RPe, 450 MHz) */
max(dev_priv->rps.efficient_freq, (u8) 9);
max_t(int, dev_priv->rps.efficient_freq,
intel_freq_opcode(dev_priv, 450));
else
dev_priv->rps.min_freq_softlimit =
dev_priv->rps.min_freq;
4670,9 → 4733,43
}
}
 
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
gen6_init_rps_frequencies(dev);
 
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
 
/* Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
 
/* 1 second timeout*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
GT_INTERVAL_FROM_US(dev_priv, 1000000));
 
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
 
/* Leaning on the below call to gen6_set_rps to program/setup the
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
 
static void gen9_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0;
int unused;
4682,31 → 4779,64
 
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
 
/* 2b: Program RC6 thresholds.*/
 
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
if (IS_SKYLAKE(dev))
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
else
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
if (HAS_GUC_UCODE(dev))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
 
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
 
/* 2c: Program Coarse Power Gating Policies. */
I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
 
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off");
/* WaRsUseTimeoutMode */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
rc6_mask);
} else {
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
}
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
/*
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
(GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
 
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
}
 
static void gen8_enable_rps(struct drm_device *dev)
4721,7 → 4851,7
 
/* 1c & 1d: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
4786,9 → 4916,9
/* 6: Ring frequency + overclocking (our driver does this later */
 
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
 
static void gen6_enable_rps(struct drm_device *dev)
4816,7 → 4946,7
I915_WRITE(GTFIFODBG, gtfifodbg);
}
 
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
/* Initialize rps frequencies */
gen6_init_rps_frequencies(dev);
4880,7 → 5010,7
}
 
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
 
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4896,7 → 5026,7
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
 
static void __gen6_update_ring_freq(struct drm_device *dev)
4905,6 → 5035,7
int min_freq = 15;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
unsigned int max_gpu_freq, min_gpu_freq;
int scaling_factor = 180;
struct cpufreq_policy *policy;
 
4924,17 → 5055,31
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
if (IS_SKYLAKE(dev)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
} else {
min_gpu_freq = dev_priv->rps.min_freq;
max_gpu_freq = dev_priv->rps.max_freq;
}
 
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
gpu_freq--) {
int diff = dev_priv->rps.max_freq - gpu_freq;
for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
 
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_SKYLAKE(dev)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
*/
ring_freq = gpu_freq;
} else if (INTEL_INFO(dev)->gen >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
} else if (IS_HASWELL(dev)) {
4968,7 → 5113,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
if (!HAS_CORE_RING_FREQ(dev))
return;
 
mutex_lock(&dev_priv->rps.hw_lock);
4978,11 → 5123,30
 
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
u32 val, rp0;
 
val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
 
switch (INTEL_INFO(dev)->eu_total) {
case 8:
/* (2 * 4) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
break;
case 12:
/* (2 * 6) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
break;
case 16:
/* (2 * 8) config */
default:
/* Setting (2 * 8) Min RP0 for any other combination */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
break;
}
 
rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
 
return rp0;
}
 
5000,21 → 5164,12
{
u32 val, rp1;
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
 
return rp1;
}
 
static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
{
u32 val, rpn;
 
val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
return rpn;
}
 
static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp1;
5182,24 → 5337,26
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
 
dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
 
dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
 
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
 
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
 
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5219,32 → 5376,17
 
mutex_lock(&dev_priv->rps.hw_lock);
 
mutex_lock(&dev_priv->dpio_lock);
mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
switch ((val >> 2) & 0x7) {
case 0:
case 1:
dev_priv->rps.cz_freq = 200;
dev_priv->mem_freq = 1600;
break;
case 2:
dev_priv->rps.cz_freq = 267;
dev_priv->mem_freq = 1600;
break;
case 3:
dev_priv->rps.cz_freq = 333;
dev_priv->mem_freq = 2000;
break;
case 4:
dev_priv->rps.cz_freq = 320;
default:
dev_priv->mem_freq = 1600;
break;
case 5:
dev_priv->rps.cz_freq = 400;
dev_priv->mem_freq = 1600;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
 
5251,22 → 5393,23
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
dev_priv->rps.max_freq);
 
dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
 
dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
 
dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
/* PUnit validated range is only [RPe, RP0] */
dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
 
WARN_ONCE((dev_priv->rps.max_freq |
5275,6 → 5418,8
dev_priv->rps.min_freq) & 1,
"Odd GPU freq values\n");
 
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
 
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5310,8 → 5455,11
 
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
 
/* 2a: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5321,7 → 5469,8
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
 
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
 
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
5335,11 → 5484,12
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
rc6_mode = GEN7_RC_CTL_TO_MODE;
 
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
5347,38 → 5497,40
 
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
/* WaDisablePwrmtrEvent:chv (pre-production hw) */
I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
 
/* 5: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
 
/* Setting Fixed Bias */
val = VLV_OVERRIDE_EN |
VLV_SOC_TDP_EN |
CHV_BIAS_CPU_50_SOC_50;
vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
 
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
 
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
 
static void valleyview_enable_rps(struct drm_device *dev)
5399,8 → 5551,12
}
 
/* If VLV, Forcewake all wells, else re-direct to regular path */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
 
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
5407,7 → 5563,6
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
 
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
5440,146 → 5595,34
 
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
/* Setting Fixed Bias */
val = VLV_OVERRIDE_EN |
VLV_SOC_TDP_EN |
VLV_BIAS_CPU_125_SOC_875;
vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq);
 
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
 
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
 
void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->ips.renderctx) {
i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
dev_priv->ips.renderctx = NULL;
}
 
if (dev_priv->ips.pwrctx) {
i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
dev_priv->ips.pwrctx = NULL;
}
}
 
static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (I915_READ(PWRCTXA)) {
/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
50);
 
I915_WRITE(PWRCTXA, 0);
POSTING_READ(PWRCTXA);
 
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
}
}
 
static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->ips.renderctx == NULL)
dev_priv->ips.renderctx = intel_alloc_context_page(dev);
if (!dev_priv->ips.renderctx)
return -ENOMEM;
 
if (dev_priv->ips.pwrctx == NULL)
dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->ips.pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
 
return 0;
}
 
static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
bool was_interruptible;
int ret;
 
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
if (!intel_enable_rc6(dev))
return;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
ret = ironlake_setup_rc6(dev);
if (ret)
return;
 
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
 
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
dev_priv->mm.interruptible = was_interruptible;
return;
}
 
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
intel_ring_emit(ring, MI_SUSPEND_FLUSH);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_FLUSH);
intel_ring_advance(ring);
 
/*
* Wait for the command parser to advance past MI_SET_CONTEXT. The HW
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_ring_idle(ring);
dev_priv->mm.interruptible = was_interruptible;
if (ret) {
DRM_ERROR("failed to enable ironlake power savings\n");
ironlake_teardown_rc6(dev);
return;
}
 
I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 
intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
}
 
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
5695,146 → 5738,27
return ((m * x) / 127) - b;
}
 
static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
static int _pxvid_to_vd(u8 pxvid)
{
if (pxvid == 0)
return 0;
 
if (pxvid >= 8 && pxvid < 31)
pxvid = 31;
 
return (pxvid + 2) * 125;
}
 
static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
struct drm_device *dev = dev_priv->dev;
static const struct v_table {
u16 vd; /* in .1 mil */
u16 vm; /* in .1 mil */
} v_table[] = {
{ 0, 0, },
{ 375, 0, },
{ 500, 0, },
{ 625, 0, },
{ 750, 0, },
{ 875, 0, },
{ 1000, 0, },
{ 1125, 0, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4250, 3125, },
{ 4375, 3250, },
{ 4500, 3375, },
{ 4625, 3500, },
{ 4750, 3625, },
{ 4875, 3750, },
{ 5000, 3875, },
{ 5125, 4000, },
{ 5250, 4125, },
{ 5375, 4250, },
{ 5500, 4375, },
{ 5625, 4500, },
{ 5750, 4625, },
{ 5875, 4750, },
{ 6000, 4875, },
{ 6125, 5000, },
{ 6250, 5125, },
{ 6375, 5250, },
{ 6500, 5375, },
{ 6625, 5500, },
{ 6750, 5625, },
{ 6875, 5750, },
{ 7000, 5875, },
{ 7125, 6000, },
{ 7250, 6125, },
{ 7375, 6250, },
{ 7500, 6375, },
{ 7625, 6500, },
{ 7750, 6625, },
{ 7875, 6750, },
{ 8000, 6875, },
{ 8125, 7000, },
{ 8250, 7125, },
{ 8375, 7250, },
{ 8500, 7375, },
{ 8625, 7500, },
{ 8750, 7625, },
{ 8875, 7750, },
{ 9000, 7875, },
{ 9125, 8000, },
{ 9250, 8125, },
{ 9375, 8250, },
{ 9500, 8375, },
{ 9625, 8500, },
{ 9750, 8625, },
{ 9875, 8750, },
{ 10000, 8875, },
{ 10125, 9000, },
{ 10250, 9125, },
{ 10375, 9250, },
{ 10500, 9375, },
{ 10625, 9500, },
{ 10750, 9625, },
{ 10875, 9750, },
{ 11000, 9875, },
{ 11125, 10000, },
{ 11250, 10125, },
{ 11375, 10250, },
{ 11500, 10375, },
{ 11625, 10500, },
{ 11750, 10625, },
{ 11875, 10750, },
{ 12000, 10875, },
{ 12125, 11000, },
{ 12250, 11125, },
{ 12375, 11250, },
{ 12500, 11375, },
{ 12625, 11500, },
{ 12750, 11625, },
{ 12875, 11750, },
{ 13000, 11875, },
{ 13125, 12000, },
{ 13250, 12125, },
{ 13375, 12250, },
{ 13500, 12375, },
{ 13625, 12500, },
{ 13750, 12625, },
{ 13875, 12750, },
{ 14000, 12875, },
{ 14125, 13000, },
{ 14250, 13125, },
{ 14375, 13250, },
{ 14500, 13375, },
{ 14625, 13500, },
{ 14750, 13625, },
{ 14875, 13750, },
{ 15000, 13875, },
{ 15125, 14000, },
{ 15250, 14125, },
{ 15375, 14250, },
{ 15500, 14375, },
{ 15625, 14500, },
{ 15750, 14625, },
{ 15875, 14750, },
{ 16000, 14875, },
{ 16125, 15000, },
};
const int vd = _pxvid_to_vd(pxvid);
const int vm = vd - 1125;
 
if (INTEL_INFO(dev)->is_mobile)
return v_table[pxvid].vm;
else
return v_table[pxvid].vd;
return vm > 0 ? vm : 0;
 
return vd;
}
 
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5891,7 → 5815,7
 
assert_spin_locked(&mchdev_lock);
 
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
 
6134,13 → 6058,13
I915_WRITE(CSIEW2, 0x04000004);
 
for (i = 0; i < 5; i++)
I915_WRITE(PEW + (i * 4), 0);
I915_WRITE(PEW(i), 0);
for (i = 0; i < 3; i++)
I915_WRITE(DEW + (i * 4), 0);
I915_WRITE(DEW(i), 0);
 
/* Program P-state weights to account for frequency power adjustment */
for (i = 0; i < 16; i++) {
u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
u32 pxvidfreq = I915_READ(PXVFREQ(i));
unsigned long freq = intel_pxfreq(pxvidfreq);
unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
6161,7 → 6085,7
for (i = 0; i < 4; i++) {
u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
I915_WRITE(PXW + (i * 4), val);
I915_WRITE(PXW(i), val);
}
 
/* Adjust magic regs to magic values (more experimental results) */
6177,7 → 6101,7
I915_WRITE(EG7, 0);
 
for (i = 0; i < 8; i++)
I915_WRITE(PXWL + (i * 4), 0);
I915_WRITE(PXWL(i), 0);
 
/* Enable PMON + select events */
I915_WRITE(ECR, 0x80000019);
6211,11 → 6135,6
 
// flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
/*
* TODO: disable RPS interrupts on GEN9+ too once RPS support
* is added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_disable_rps_interrupts(dev);
}
 
6246,7 → 6165,6
 
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6) {
intel_suspend_gt_powersave(dev);
 
6274,11 → 6192,6
 
mutex_lock(&dev_priv->rps.hw_lock);
 
/*
* TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
* added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_reset_rps_interrupts(dev);
 
if (IS_CHERRYVIEW(dev)) {
6286,7 → 6199,10
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (INTEL_INFO(dev)->gen >= 9) {
gen9_enable_rc6(dev);
gen9_enable_rps(dev);
if (IS_SKYLAKE(dev))
__gen6_update_ring_freq(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
6294,9 → 6210,15
gen6_enable_rps(dev);
__gen6_update_ring_freq(dev);
}
 
WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
 
WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
 
dev_priv->rps.enabled = true;
 
if (INTEL_INFO(dev)->gen < 9)
gen6_enable_rps_interrupts(dev);
 
mutex_unlock(&dev_priv->rps.hw_lock);
6308,10 → 6230,13
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Powersaving is controlled by the host when inside a VM */
if (intel_vgpu_active(dev))
return;
 
if (IS_IRONLAKE_M(dev)) {
mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
6359,13 → 6284,15
static void g4x_disable_trickle_feed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
enum pipe pipe;
 
for_each_pipe(dev_priv, pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_primary_plane(dev_priv, pipe);
 
I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
POSTING_READ(DSPSURF(pipe));
}
}
 
6628,14 → 6555,14
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
if (HAS_PCH_LPT_LP(dev))
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
 
/* WADPOClockGatingDisable:hsw */
I915_WRITE(_TRANSA_CHICKEN1,
I915_READ(_TRANSA_CHICKEN1) |
I915_WRITE(TRANS_CHICKEN1(PIPE_A),
I915_READ(TRANS_CHICKEN1(PIPE_A)) |
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
 
6643,7 → 6570,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
if (HAS_PCH_LPT_LP(dev)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
 
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6655,10 → 6582,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
uint32_t misccpctl;
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
ilk_init_lp_watermarks(dev);
 
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6687,6 → 6613,22
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
/*
* WaProgramL3SqcReg1Default:bdw
* WaTempDisableDOPClkGating:bdw
*/
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
/*
* WaGttCachingOffByDefault:bdw
* GTT cache may not work with big pages, so if those
* are ever enabled GTT cache may need to be disabled.
*/
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
 
lpt_init_clock_gating(dev);
}
 
6732,6 → 6674,10
I915_WRITE(GEN7_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
 
/* WaSampleCChickenBitEnable:hsw */
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
 
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
6840,11 → 6786,22
gen6_check_mch_setup(dev);
}
 
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
/*
* Disable trickle feed and enable pnd deadline calculation
*/
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
I915_WRITE(CBR1_VLV, 0);
}
 
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
vlv_init_display_clock_gating(dev_priv);
 
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
6892,8 → 6849,6
I915_WRITE(GEN7_UCGCTL4,
I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
 
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization isn't listed for VLV.
6902,6 → 6857,17
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
 
/*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
6919,10 → 6885,8
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
vlv_init_display_clock_gating(dev_priv);
 
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
6940,6 → 6904,12
/* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
/*
* GTT cache may not work with big pages, so if those
* are ever enabled GTT cache may need to be disabled.
*/
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
 
static void g4x_init_clock_gating(struct drm_device *dev)
7056,6 → 7026,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.init_clock_gating)
dev_priv->display.init_clock_gating(dev);
}
 
7065,43 → 7036,12
lpt_suspend_hw(dev);
}
 
static void intel_init_fbc(struct drm_i915_private *dev_priv)
{
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
return;
}
 
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = gen7_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev_priv)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
} else {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
 
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
 
dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
}
 
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_init_fbc(dev_priv);
intel_fbc_init(dev_priv);
 
/* For cxsr */
if (IS_PINEVIEW(dev))
7113,7 → 7053,9
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
 
dev_priv->display.init_clock_gating = gen9_init_clock_gating;
if (IS_BROXTON(dev))
dev_priv->display.init_clock_gating =
bxt_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
} else if (HAS_PCH_SPLIT(dev)) {
7141,13 → 7083,15
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
dev_priv->display.update_wm = cherryview_update_wm;
dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
vlv_setup_wm_latency(dev);
 
dev_priv->display.update_wm = vlv_update_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
vlv_setup_wm_latency(dev);
 
dev_priv->display.update_wm = vlv_update_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
} else if (IS_PINEVIEW(dev)) {
7264,7 → 7208,7
 
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
div = vlv_gpu_freq_div(czclk_freq);
if (div < 0)
7275,7 → 7219,7
 
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
mul = vlv_gpu_freq_div(czclk_freq);
if (mul < 0)
7286,7 → 7230,7
 
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div, czclk_freq = dev_priv->rps.cz_freq;
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
div = vlv_gpu_freq_div(czclk_freq) / 2;
if (div < 0)
7297,7 → 7241,7
 
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul, czclk_freq = dev_priv->rps.cz_freq;
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
mul = vlv_gpu_freq_div(czclk_freq) / 2;
if (mul < 0)
7307,28 → 7251,70
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
 
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int ret = -1;
if (IS_GEN9(dev_priv->dev))
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
GEN9_FREQ_SCALER);
else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_gpu_freq(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
return byt_gpu_freq(dev_priv, val);
else
return val * GT_FREQUENCY_MULTIPLIER;
}
 
if (IS_CHERRYVIEW(dev_priv->dev))
ret = chv_gpu_freq(dev_priv, val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
if (IS_GEN9(dev_priv->dev))
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
GT_FREQUENCY_MULTIPLIER);
else if (IS_CHERRYVIEW(dev_priv->dev))
return chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
ret = byt_gpu_freq(dev_priv, val);
return byt_freq_opcode(dev_priv, val);
else
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
}
 
return ret;
struct request_boost {
struct work_struct work;
struct drm_i915_gem_request *req;
};
 
static void __intel_rps_boost_work(struct work_struct *work)
{
struct request_boost *boost = container_of(work, struct request_boost, work);
struct drm_i915_gem_request *req = boost->req;
 
if (!i915_gem_request_completed(req, true))
gen6_rps_boost(to_i915(req->ring->dev), NULL,
req->emitted_jiffies);
 
i915_gem_request_unreference__unlocked(req);
kfree(boost);
}
 
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
int ret = -1;
struct request_boost *boost;
 
if (IS_CHERRYVIEW(dev_priv->dev))
ret = chv_freq_opcode(dev_priv, val);
else if (IS_VALLEYVIEW(dev_priv->dev))
ret = byt_freq_opcode(dev_priv, val);
if (req == NULL || INTEL_INFO(dev)->gen < 6)
return;
 
return ret;
if (i915_gem_request_completed(req, true))
return;
 
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
if (boost == NULL)
return;
 
i915_gem_request_reference(req);
boost->req = req;
 
INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(to_i915(dev)->wq, &boost->work);
}
 
void intel_pm_setup(struct drm_device *dev)
7336,9 → 7322,13
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_init(&dev_priv->rps.hw_lock);
spin_lock_init(&dev_priv->rps.client_lock);
 
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
INIT_LIST_HEAD(&dev_priv->rps.clients);
INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
 
dev_priv->pm.suspended = false;
}
/drivers/video/drm/i915/intel_psr.c
61,25 → 61,26
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
 
bool intel_psr_is_enabled(struct drm_device *dev)
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
 
if (!HAS_PSR(dev))
return false;
 
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
val = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
 
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
struct edp_vsc_psr *vsc_psr)
const struct edp_vsc_psr *vsc_psr)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
 
89,21 → 90,52
I915_WRITE(ctl_reg, 0);
POSTING_READ(ctl_reg);
 
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
if (i < sizeof(struct edp_vsc_psr))
I915_WRITE(data_reg + i, *data++);
else
I915_WRITE(data_reg + i, 0);
for (i = 0; i < sizeof(*vsc_psr); i += 4) {
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
i >> 2), *data);
data++;
}
for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
i >> 2), 0);
 
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
POSTING_READ(ctl_reg);
}
 
static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
uint32_t val;
 
/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
val = I915_READ(VLV_VSCSDP(pipe));
val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
I915_WRITE(VLV_VSCSDP(pipe), val);
}
 
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
 
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
psr_vsc.sdp_header.HB2 = 0x3;
psr_vsc.sdp_header.HB3 = 0xb;
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
 
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
 
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
113,14 → 145,20
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
 
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
{
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
}
 
static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
uint32_t aux_data_reg, aux_ctl_reg;
int precharge = 0x3;
bool only_standby = false;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
134,23 → 172,40
 
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 
/* Enable AUX frame sync at sink */
if (dev_priv->psr.aux_frame_sync)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE);
 
aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
 
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
I915_WRITE(aux_data_reg + i,
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 
I915_WRITE(EDP_PSR_AUX_CTL(dev),
if (INTEL_INFO(dev)->gen >= 9) {
uint32_t val;
 
val = I915_READ(aux_ctl_reg);
val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
/* Use hardcoded data values for PSR, frame sync and GTC */
val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
I915_WRITE(aux_ctl_reg, val);
} else {
I915_WRITE(aux_ctl_reg,
DP_AUX_CH_CTL_TIME_OUT_400us |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
157,28 → 212,69
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
 
static void intel_psr_enable_source(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
}
 
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
/* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
I915_WRITE(VLV_PSRCTL(pipe),
VLV_EDP_PSR_MODE_SW_TIMER |
VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
VLV_EDP_PSR_ENABLE);
}
 
static void vlv_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
/* Let's do the transition from PSR_state 1 to PSR_state 2
* that is PSR transition to active - static frame transmission.
* Then Hardware is responsible for the transition to PSR_state 3
* that is PSR active - no Remote Frame Buffer (RFB) update.
*/
I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
VLV_EDP_PSR_ACTIVE_ENTRY);
}
 
static void hsw_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
/* Lately it was identified that depending on panel idle frame count
* calculated at HW can be off by 1. So let's use what came
* from VBT + 1.
* There are also other cases where panel demands at least 4
* but VBT is not being set. To cover these 2 cases lets use
* at least 5 when VBT isn't set to be on the safest side.
*/
uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
dev_priv->vbt.psr.idle_frames + 1 : 5;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
 
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
/* It doesn't mean we shouldn't send TPS patters, so let's
send the minimal TP1 possible and skip TP2. */
val |= EDP_PSR_TP1_TIME_100us;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
/* Sink should be able to train with the 5 or 6 idle patterns */
idle_frames += 4;
}
 
I915_WRITE(EDP_PSR_CTL(dev), val |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
185,6 → 281,10
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
 
if (dev_priv->psr.psr2_support)
I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
}
 
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
211,27 → 311,30
return false;
}
 
/* Below limitations aren't valid for Broadwell */
if (IS_BROADWELL(dev))
goto out;
 
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
if (IS_HASWELL(dev) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (IS_HASWELL(dev) &&
intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
 
out:
if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) ||
(dig_port->port != PORT_A))) {
DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
return false;
}
 
dev_priv->psr.source_ok = true;
return true;
}
 
static void intel_psr_do_enable(struct intel_dp *intel_dp)
static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
242,7 → 345,14
lockdep_assert_held(&dev_priv->psr.lock);
 
/* Enable/Re-enable PSR on the host */
intel_psr_enable_source(intel_dp);
if (HAS_DDI(dev))
/* On HSW+ after we enable PSR on source it will activate it
* as soon as it match configure idle_frame count. So
* we just actually enable it here on activation time.
*/
hsw_psr_enable_source(intel_dp);
else
vlv_psr_activate(intel_dp);
 
dev_priv->psr.active = true;
}
258,6 → 368,7
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
 
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
280,38 → 391,80
 
dev_priv->psr.busy_frontbuffer_bits = 0;
 
intel_psr_setup_vsc(intel_dp);
if (HAS_DDI(dev)) {
hsw_psr_setup_vsc(intel_dp);
 
if (dev_priv->psr.psr2_support) {
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
if (crtc->config->pipe_src_w > 3200 ||
crtc->config->pipe_src_h > 2000)
dev_priv->psr.psr2_support = false;
else
skl_psr_setup_su_vsc(intel_dp);
}
 
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
EDP_PSR_DEBUG_MASK_HPD);
 
/* Enable PSR on the panel */
intel_psr_enable_sink(intel_dp);
hsw_psr_enable_sink(intel_dp);
 
if (INTEL_INFO(dev)->gen >= 9)
intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
 
/* Enable PSR on the panel */
vlv_psr_enable_sink(intel_dp);
 
/* On HSW+ enable_source also means go to PSR entry/active
* state as soon as idle_frame achieved and here would be
* to soon. However on VLV enable_source just enable PSR
* but let it on inactive state. So we might do this prior
* to active transition, i.e. here.
*/
vlv_psr_enable_source(intel_dp);
}
 
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
 
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
*
* This function needs to be called before disabling pipe.
*/
void intel_psr_disable(struct intel_dp *intel_dp)
static void vlv_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t val;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
if (dev_priv->psr.active) {
/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
VLV_EDP_PSR_IN_TRANS) == 0, 1))
WARN(1, "PSR transition took longer than expected\n");
 
val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
val &= ~VLV_EDP_PSR_ENABLE;
val &= ~VLV_EDP_PSR_MODE_MASK;
I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
 
dev_priv->psr.active = false;
} else {
WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
}
}
 
static void hsw_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
325,7 → 478,31
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
}
 
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
*
* This function needs to be called before disabling pipe.
*/
void intel_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
if (HAS_DDI(dev))
hsw_psr_disable(intel_dp);
else
vlv_psr_disable(intel_dp);
 
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
 
337,6 → 514,8
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
/* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
343,12 → 522,19
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
if (HAS_DDI(dev_priv->dev)) {
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
 
} else {
if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
}
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
 
363,7 → 549,7
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
 
intel_psr_do_enable(intel_dp);
intel_psr_activate(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
371,19 → 557,93
static void intel_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = dev_priv->psr.enabled;
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 val;
 
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
if (!dev_priv->psr.active)
return;
 
if (HAS_DDI(dev)) {
val = I915_READ(EDP_PSR_CTL(dev));
 
WARN_ON(!(val & EDP_PSR_ENABLE));
 
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
} else {
val = I915_READ(VLV_PSRCTL(pipe));
 
/* Here we do the transition from PSR_state 3 to PSR_state 5
* directly once PSR State 4 that is active with single frame
* update can be skipped. PSR_state 5 that is PSR exit then
* Hardware is responsible to transition back to PSR_state 1
* that is PSR inactive. Same state after
* vlv_edp_psr_enable_source.
*/
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
I915_WRITE(VLV_PSRCTL(pipe), val);
 
/* Send AUX wake up - Spec says after transitioning to PSR
* active we have to send AUX wake up by writing 01h in DPCD
* 600h of sink device.
* XXX: This might slow down the transition, but without this
* HW doesn't complete the transition to PSR_state 1 and we
* never get the screen updated.
*/
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D0);
}
 
dev_priv->psr.active = false;
}
 
/**
* intel_psr_single_frame_update - Single Frame Update
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* Some platforms support a single frame update feature that is used to
* send and update only one frame on Remote Frame Buffer.
* So far it is only implemented for Valleyview and Cherryview because
* hardware requires this to be done before a page flip.
*/
void intel_psr_single_frame_update(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
u32 val;
 
/*
* Single frame update is already supported on BDW+ but it requires
* many W/A and it isn't really needed.
*/
if (!IS_VALLEYVIEW(dev))
return;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
val = I915_READ(VLV_PSRCTL(pipe));
 
/*
* We need to set this bit before writing registers for a flip.
* This bit will be self-clear when it gets to the PSR active state.
*/
I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
}
mutex_unlock(&dev_priv->psr.lock);
}
 
/**
* intel_psr_invalidate - Invalidade PSR
* @dev: DRM device
412,11 → 672,12
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 
if (frontbuffer_bits)
intel_psr_exit(dev);
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->psr.lock);
}
 
424,6 → 685,7
* intel_psr_flush - Flush PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
433,11 → 695,12
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
int delay_ms = HAS_DDI(dev) ? 100 : 500;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
447,21 → 710,33
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
if (HAS_DDI(dev)) {
/*
* On Haswell sprite plane updates don't result in a psr invalidating
* signal in the hardware. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering.
* By definition every flush should mean invalidate + flush,
* however on core platforms let's minimize the
* disable/re-enable so we can avoid the invalidate when flip
* originated the flush.
*/
if (IS_HASWELL(dev) &&
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
if (frontbuffer_bits && origin != ORIGIN_FLIP)
intel_psr_exit(dev);
} else {
/*
* On Valleyview and Cherryview we don't use hardware tracking
* so any plane updates or cursor moves don't result in a PSR
* invalidating. Which means we need to manually fake this in
* software for all flushes.
*/
if (frontbuffer_bits)
intel_psr_exit(dev);
}
 
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
msecs_to_jiffies(delay_ms));
mutex_unlock(&dev_priv->psr.lock);
}
 
/drivers/video/drm/i915/intel_renderstate_gen6.c
1,3 → 1,28
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
 
#include "intel_renderstate.h"
 
static const u32 gen6_null_state_relocs[] = {
/drivers/video/drm/i915/intel_renderstate_gen7.c
1,3 → 1,28
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
 
#include "intel_renderstate.h"
 
static const u32 gen7_null_state_relocs[] = {
/drivers/video/drm/i915/intel_renderstate_gen8.c
1,3 → 1,28
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
 
#include "intel_renderstate.h"
 
static const u32 gen8_null_state_relocs[] = {
/drivers/video/drm/i915/intel_renderstate_gen9.c
1,3 → 1,28
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
 
#include "intel_renderstate.h"
 
static const u32 gen9_null_state_relocs[] = {
/drivers/video/drm/i915/intel_ringbuffer.c
52,18 → 52,29
 
int __intel_ring_space(int head, int tail, int size)
{
int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
int space = head - tail;
if (space <= 0)
space += size;
return space;
return space - I915_RING_FREE_SPACE;
}
 
int intel_ring_space(struct intel_ringbuffer *ringbuf)
void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
{
return __intel_ring_space(ringbuf->head & HEAD_ADDR,
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
}
 
ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
ringbuf->tail, ringbuf->size);
}
 
int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
intel_ring_update_space(ringbuf);
return ringbuf->space;
}
 
bool intel_ring_stopped(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
70,7 → 81,7
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
}
 
void __intel_ring_advance(struct intel_engine_cs *ring)
static void __intel_ring_advance(struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
80,10 → 91,11
}
 
static int
gen2_render_ring_flush(struct intel_engine_cs *ring,
gen2_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
struct intel_engine_cs *ring = req->ring;
u32 cmd;
int ret;
 
94,7 → 106,7
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
cmd |= MI_READ_FLUSH;
 
ret = intel_ring_begin(ring, 2);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
 
106,10 → 118,11
}
 
static int
gen4_render_ring_flush(struct intel_engine_cs *ring,
gen4_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
u32 cmd;
int ret;
152,7 → 165,7
(IS_G4X(dev) || IS_GEN5(dev)))
cmd |= MI_INVALIDATE_ISP;
 
ret = intel_ring_begin(ring, 2);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
 
201,13 → 214,13
* really our business. That leaves only stall at scoreboard.
*/
static int
intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
220,7 → 233,7
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
236,15 → 249,16
}
 
static int
gen6_render_ring_flush(struct intel_engine_cs *ring,
gen6_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
struct intel_engine_cs *ring = req->ring;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = intel_emit_post_sync_nonzero_flush(ring);
ret = intel_emit_post_sync_nonzero_flush(req);
if (ret)
return ret;
 
274,7 → 288,7
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
288,11 → 302,12
}
 
static int
gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
306,33 → 321,11
return 0;
}
 
static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
{
int ret;
 
if (!ring->fbc_dirty)
return 0;
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
/* WaFbcNukeOn3DBlt:ivb/hsw */
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, value);
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
intel_ring_advance(ring);
 
ring->fbc_dirty = false;
return 0;
}
 
static int
gen7_render_ring_flush(struct intel_engine_cs *ring,
gen7_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
struct intel_engine_cs *ring = req->ring;
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
354,6 → 347,7
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
374,10 → 368,10
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
gen7_render_ring_cs_stall_wa(ring);
gen7_render_ring_cs_stall_wa(req);
}
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
387,19 → 381,17
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 
return 0;
}
 
static int
gen8_emit_pipe_control(struct intel_engine_cs *ring,
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
415,11 → 407,11
}
 
static int
gen8_render_ring_flush(struct intel_engine_cs *ring,
gen8_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
flags |= PIPE_CONTROL_CS_STALL;
427,6 → 419,7
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
439,7 → 432,7
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
ret = gen8_emit_pipe_control(ring,
ret = gen8_emit_pipe_control(req,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD,
0);
447,14 → 440,7
return ret;
}
 
ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
if (ret)
return ret;
 
if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 
return 0;
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
 
static void ring_write_tail(struct intel_engine_cs *ring,
491,6 → 477,68
I915_WRITE(HWS_PGA, addr);
}
 
static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 mmio = 0;
 
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
if (IS_GEN7(dev)) {
switch (ring->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
/*
* VCS2 actually doesn't exist on Gen7. Only shut up
* gcc switch check warning
*/
case VCS2:
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
case VECS:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base);
}
 
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
 
/*
* Flush the TLB for this page
*
* FIXME: These two bits have disappeared on gen8, so a question
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
u32 reg = RING_INSTPM(ring->mmio_base);
 
/* ring should be idle before issuing a sync flush*/
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
ring->name);
}
}
 
static bool stop_ring(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
528,7 → 576,7
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
 
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
if (!stop_ring(ring)) {
/* G45 ring initialization often fails to reset head to zero */
592,15 → 640,15
goto out;
}
 
ringbuf->last_retired_head = -1;
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = intel_ring_space(ringbuf);
ringbuf->last_retired_head = -1;
intel_ring_update_space(ringbuf);
 
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
out:
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
return ret;
}
627,8 → 675,7
{
int ret;
 
if (ring->scratch.obj)
return 0;
WARN_ON(ring->scratch.obj);
 
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) {
664,23 → 711,23
return ret;
}
 
static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
struct intel_context *ctx)
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
 
if (WARN_ON(w->count == 0))
if (w->count == 0)
return 0;
 
ring->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(ring);
ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
 
ret = intel_ring_begin(ring, (w->count * 2 + 2));
ret = intel_ring_begin(req, (w->count * 2 + 2));
if (ret)
return ret;
 
694,7 → 741,7
intel_ring_advance(ring);
 
ring->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(ring);
ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
 
703,6 → 750,21
return 0;
}
 
static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
{
int ret;
 
ret = intel_ring_workarounds_emit(req);
if (ret != 0)
return ret;
 
ret = i915_gem_render_state_init(req);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
 
return ret;
}
 
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val)
{
720,11 → 782,11
return 0;
}
 
#define WA_REG(addr, mask, val) { \
#define WA_REG(addr, mask, val) do { \
const int r = wa_add(dev_priv, (addr), (mask), (val)); \
if (r) \
return r; \
}
} while (0)
 
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
740,37 → 802,43
 
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
static int bdw_init_workarounds(struct intel_engine_cs *ring)
static int gen8_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* WaDisablePartialInstShootdown:bdw */
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
STALL_DOP_GATING_DISABLE);
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
/* WaDisableDopClockGating:bdw */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
 
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisablePartialInstShootdown:bdw,chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
HDC_FORCE_NON_COHERENT);
 
/* Wa4x4STCOptimizationDisable:bdw */
WA_SET_BIT_MASKED(CACHE_MODE_1,
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
* polygons in the same 8x4 pixel/sample area to be processed without
* stalling waiting for the earlier ones to write to Hierarchical Z
* buffer."
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
 
/* Wa4x4STCOptimizationDisable:bdw,chv */
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
 
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
786,27 → 854,279
return 0;
}
 
static int bdw_init_workarounds(struct intel_engine_cs *ring)
{
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
ret = gen8_init_workarounds(ring);
if (ret)
return ret;
 
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
 
/* WaDisableDopClockGating:bdw */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
 
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
 
WA_SET_BIT_MASKED(HDC_CHICKEN0,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
return 0;
}
 
static int chv_init_workarounds(struct intel_engine_cs *ring)
{
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* WaDisablePartialInstShootdown:chv */
ret = gen8_init_workarounds(ring);
if (ret)
return ret;
 
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
 
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
 
return 0;
}
 
static int gen9_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
/* WaEnableLbsSlaRetryTimerDecrement:skl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
 
/* WaDisableKillLogic:bxt,skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
 
/* WaDisablePartialInstShootdown:skl,bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
STALL_DOP_GATING_DISABLE);
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
/* Syncing dependencies between camera and graphics:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
INTEL_REVID(dev) == SKL_REVID_B0)) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
}
 
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
* WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
* but we do that in per ctx batchbuffer as there is an issue
* with this register not getting restored on ctx restore
*/
/* WaForceEnableNonCoherent:chv */
/* WaHdcDisableFetchWhenMasked:chv */
}
 
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
IS_BROXTON(dev)) {
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX);
}
 
/* Wa4x4STCOptimizationDisable:skl,bxt */
/* WaDisablePartialResolveInVc:skl,bxt */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
 
/* WaCcsTlbPrefetchDisable:skl,bxt */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
 
/* WaDisableMaskBasedCammingInRCC:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
 
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
 
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
if (IS_SKYLAKE(dev) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
}
 
/* WaDisableSTUnitPowerOptimization:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
return 0;
}
 
static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
 
for (i = 0; i < 3; i++) {
u8 ss;
 
/*
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
continue;
 
/*
* subslice_7eu[i] != 0 (because of the check above) and
* ss_max == 4 (maximum number of subslices possible per slice)
*
* -> 0 <= ss <= 3;
*/
ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
 
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
return 0;
 
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
 
return 0;
}
 
static int skl_init_workarounds(struct intel_engine_cs *ring)
{
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
ret = gen9_init_workarounds(ring);
if (ret)
return ret;
 
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
/* WaDisableHDCInvalidation:skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
 
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
}
 
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
if (INTEL_REVID(dev) <= SKL_REVID_E0)
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
 
/* WaEnableGapsTsvCreditFix:skl */
if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
}
 
/* WaDisablePowerCompilerClockGating:skl */
if (INTEL_REVID(dev) == SKL_REVID_B0)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
* a TLB invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:skl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
HDC_DONOT_FETCH_MEM_WHEN_MASKED);
HDC_FORCE_NON_COHERENT);
}
 
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
INTEL_REVID(dev) == SKL_REVID_D0)
/* WaBarrierPerformanceFixDisable:skl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
 
/* WaDisableSbeCacheDispatchPortSharing:skl */
if (INTEL_REVID(dev) <= SKL_REVID_F0) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
 
return skl_tune_iz_hashing(ring);
}
 
static int bxt_init_workarounds(struct intel_engine_cs *ring)
{
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
ret = gen9_init_workarounds(ring);
if (ret)
return ret;
 
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
if (INTEL_REVID(dev) == BXT_REVID_A0)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
 
/* WaSetClckGatingDisableMedia:bxt */
if (INTEL_REVID(dev) == BXT_REVID_A0) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
 
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
 
/* WaDisableSbeCacheDispatchPortSharing:bxt */
if (INTEL_REVID(dev) <= BXT_REVID_B0) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
 
return 0;
}
 
825,6 → 1145,12
if (IS_CHERRYVIEW(dev))
return chv_init_workarounds(ring);
 
if (IS_SKYLAKE(dev))
return skl_init_workarounds(ring);
 
if (IS_BROXTON(dev))
return bxt_init_workarounds(ring);
 
return 0;
}
 
844,9 → 1170,9
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
/* Required for the hardware to program scanline values for waiting */
861,12 → 1187,6
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
if (INTEL_INFO(dev)->gen >= 5) {
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
}
 
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
877,7 → 1197,7
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
 
if (INTEL_INFO(dev)->gen >= 6)
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
if (HAS_L3_DPF(dev))
900,10 → 1220,11
intel_fini_pipe_control(ring);
}
 
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
913,15 → 1234,17
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
 
ret = intel_ring_begin(signaller, num_dwords);
ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
 
for_each_ring(waiter, dev_priv, i) {
u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
 
seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
928,7 → 1251,7
PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
938,10 → 1261,11
return 0;
}
 
static int gen8_xcs_signal(struct intel_engine_cs *signaller,
static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
951,21 → 1275,23
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
 
ret = intel_ring_begin(signaller, num_dwords);
ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
 
for_each_ring(waiter, dev_priv, i) {
u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
 
seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
974,9 → 1300,10
return 0;
}
 
static int gen6_signal(struct intel_engine_cs *signaller,
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
struct intel_engine_cs *signaller = signaller_req->ring;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
987,7 → 1314,7
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
 
ret = intel_ring_begin(signaller, num_dwords);
ret = intel_ring_begin(signaller_req, num_dwords);
if (ret)
return ret;
 
994,9 → 1321,10
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
u32 seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, seqno);
}
}
 
1010,21 → 1338,21
/**
* gen6_add_request - Update the semaphore mailbox registers
*
* @ring - ring that is adding a request
* @seqno - return seqno stuck into the ring
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
static int
gen6_add_request(struct intel_engine_cs *ring)
gen6_add_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
if (ring->semaphore.signal)
ret = ring->semaphore.signal(ring, 4);
ret = ring->semaphore.signal(req, 4);
else
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
 
if (ret)
return ret;
1031,7 → 1359,7
 
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
 
1054,14 → 1382,15
*/
 
static int
gen8_ring_sync(struct intel_engine_cs *waiter,
gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
struct intel_engine_cs *waiter = waiter_req->ring;
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
int ret;
 
ret = intel_ring_begin(waiter, 4);
ret = intel_ring_begin(waiter_req, 4);
if (ret)
return ret;
 
1079,10 → 1408,11
}
 
static int
gen6_ring_sync(struct intel_engine_cs *waiter,
gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
struct intel_engine_cs *waiter = waiter_req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
1097,7 → 1427,7
 
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
ret = intel_ring_begin(waiter, 4);
ret = intel_ring_begin(waiter_req, 4);
if (ret)
return ret;
 
1128,8 → 1458,9
} while (0)
 
static int
pc_render_add_request(struct intel_engine_cs *ring)
pc_render_add_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
 
1141,7 → 1472,7
* incoherence by flushing the 6 PIPE_NOTIFY buffers out to
* memory before requesting an interrupt.
*/
ret = intel_ring_begin(ring, 32);
ret = intel_ring_begin(req, 32);
if (ret)
return ret;
 
1149,7 → 1480,7
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
1168,7 → 1499,7
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, 0);
__intel_ring_advance(ring);
 
1318,76 → 1649,15
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
void intel_ring_setup_status_page(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 mmio = 0;
 
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
if (IS_GEN7(dev)) {
switch (ring->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
/*
* VCS2 actually doesn't exist on Gen7. Only shut up
* gcc switch check warning
*/
case VCS2:
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
case VECS:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base);
}
 
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
 
/*
* Flush the TLB for this page
*
* FIXME: These two bits have disappeared on gen8, so a question
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
u32 reg = RING_INSTPM(ring->mmio_base);
 
/* ring should be idle before issuing a sync flush*/
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
ring->name);
}
}
 
static int
bsd_ring_flush(struct intel_engine_cs *ring,
bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
ret = intel_ring_begin(ring, 2);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
 
1398,17 → 1668,18
}
 
static int
i9xx_add_request(struct intel_engine_cs *ring)
i9xx_add_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, ring->outstanding_lazy_seqno);
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
 
1540,13 → 1811,14
}
 
static int
i965_dispatch_execbuffer(struct intel_engine_cs *ring,
i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned flags)
unsigned dispatch_flags)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
ret = intel_ring_begin(ring, 2);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
 
1553,7 → 1825,8
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
 
1565,14 → 1838,15
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned flags)
unsigned dispatch_flags)
{
struct intel_engine_cs *ring = req->ring;
u32 cs_offset = ring->scratch.gtt_offset;
int ret;
 
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
 
1585,11 → 1859,11
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
if ((flags & I915_DISPATCH_PINNED) == 0) {
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
 
ret = intel_ring_begin(ring, 6 + 2);
ret = intel_ring_begin(req, 6 + 2);
if (ret)
return ret;
 
1612,12 → 1886,13
offset = cs_offset;
}
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
1626,18 → 1901,20
}
 
static int
i915_dispatch_execbuffer(struct intel_engine_cs *ring,
i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned flags)
unsigned dispatch_flags)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
ret = intel_ring_begin(ring, 2);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
 
return 0;
1759,13 → 2036,13
return 0;
}
 
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
 
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_gem_object *obj;
1786,29 → 2063,70
return 0;
}
 
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
{
struct intel_ringbuffer *ring;
int ret;
 
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (ring == NULL)
return ERR_PTR(-ENOMEM);
 
ring->ring = engine;
 
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
*/
ring->effective_size = size;
if (IS_I830(engine->dev) || IS_845G(engine->dev))
ring->effective_size -= 2 * CACHELINE_BYTES;
 
ring->last_retired_head = -1;
intel_ring_update_space(ring);
 
ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
engine->name, ret);
kfree(ring);
return ERR_PTR(ret);
}
 
return ring;
}
 
void
intel_ringbuffer_free(struct intel_ringbuffer *ring)
{
intel_destroy_ringbuffer_obj(ring);
kfree(ring);
}
 
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct intel_ringbuffer *ringbuf;
int ret;
 
if (ringbuf == NULL) {
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf)
return -ENOMEM;
ring->buffer = ringbuf;
}
WARN_ON(ring->buffer);
 
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->execlist_queue);
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->ring = ring;
i915_gem_batch_pool_init(dev, &ring->batch_pool);
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
init_waitqueue_head(&ring->irq_queue);
 
ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
if (IS_ERR(ringbuf))
return PTR_ERR(ringbuf);
ring->buffer = ringbuf;
 
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
if (ret)
1820,14 → 2138,6
goto error;
}
 
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
ring->name, ret);
goto error;
}
 
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
1835,28 → 2145,15
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
}
 
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
*/
ringbuf->effective_size = ringbuf->size;
if (IS_I830(dev) || IS_845G(dev))
ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
ret = i915_cmd_parser_init_ring(ring);
if (ret)
goto error;
 
ret = ring->init(ring);
if (ret)
goto error;
 
return 0;
 
error:
kfree(ringbuf);
intel_ringbuffer_free(ringbuf);
ring->buffer = NULL;
return ret;
}
1864,131 → 2161,64
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
 
if (!intel_ring_initialized(ring))
return;
 
dev_priv = to_i915(ring->dev);
ringbuf = ring->buffer;
 
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
intel_unpin_ringbuffer_obj(ring->buffer);
intel_ringbuffer_free(ring->buffer);
ring->buffer = NULL;
 
if (ring->cleanup)
ring->cleanup(ring);
 
// cleanup_status_page(ring);
cleanup_status_page(ring);
 
i915_cmd_parser_fini_ring(ring);
 
kfree(ringbuf);
ring->buffer = NULL;
i915_gem_batch_pool_fini(&ring->batch_pool);
}
 
static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
u32 seqno = 0;
unsigned space;
int ret;
 
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n)
if (intel_ring_space(ringbuf) >= n)
return 0;
}
 
/* The whole point of reserving space is to not wait! */
WARN_ON(ringbuf->reserved_in_use);
 
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= n) {
seqno = request->seqno;
space = __intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size);
if (space >= n)
break;
}
}
 
if (seqno == 0)
if (WARN_ON(&request->list == &ring->request_list))
return -ENOSPC;
 
ret = i915_wait_seqno(ring, seqno);
ret = i915_wait_request(request);
if (ret)
return ret;
 
i915_gem_retire_requests_ring(ring);
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ringbuf->space = intel_ring_space(ringbuf);
ringbuf->space = space;
return 0;
}
 
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ring->buffer;
unsigned long end;
int ret;
 
ret = intel_ring_wait_request(ring, n);
if (ret != -ENOSPC)
return ret;
 
/* force the tail write in case we have been skipping them */
__intel_ring_advance(ring);
 
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume
* timers don't work yet, so prevent a complete hang in that
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
 
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
}
 
msleep(1);
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
break;
 
if (time_after(jiffies, end)) {
ret = -EBUSY;
break;
}
} while (1);
trace_i915_ring_wait_end(ring);
return ret;
}
 
static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
{
uint32_t __iomem *virt;
struct intel_ringbuffer *ringbuf = ring->buffer;
int rem = ringbuf->size - ringbuf->tail;
 
if (ringbuf->space < rem) {
int ret = ring_wait_for_space(ring, rem);
if (ret)
return ret;
}
 
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
1995,80 → 2225,154
iowrite32(MI_NOOP, virt++);
 
ringbuf->tail = 0;
ringbuf->space = intel_ring_space(ringbuf);
 
return 0;
intel_ring_update_space(ringbuf);
}
 
int intel_ring_idle(struct intel_engine_cs *ring)
{
u32 seqno;
int ret;
struct drm_i915_gem_request *req;
 
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_seqno) {
ret = i915_add_request(ring, NULL);
if (ret)
return ret;
}
 
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))
return 0;
 
seqno = list_entry(ring->request_list.prev,
req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
list)->seqno;
list);
 
return i915_wait_seqno(ring, seqno);
/* Make sure we do not trigger any retires */
return __i915_wait_request(req,
atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
to_i915(ring->dev)->mm.interruptible,
NULL, NULL);
}
 
static int
intel_ring_alloc_seqno(struct intel_engine_cs *ring)
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
if (ring->outstanding_lazy_seqno)
request->ringbuf = request->ring->buffer;
return 0;
}
 
if (ring->preallocated_lazy_request == NULL) {
struct drm_i915_gem_request *request;
int intel_ring_reserve_space(struct drm_i915_gem_request *request)
{
/*
* The first call merely notes the reserve request and is common for
* all back ends. The subsequent localised _begin() call actually
* ensures that the reservation is available. Without the begin, if
* the request creator immediately submitted the request without
* adding any commands to it then there might not actually be
* sufficient room for the submission commands.
*/
intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
 
request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
return intel_ring_begin(request, 0);
}
 
ring->preallocated_lazy_request = request;
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
{
WARN_ON(ringbuf->reserved_size);
WARN_ON(ringbuf->reserved_in_use);
 
ringbuf->reserved_size = size;
}
 
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
{
WARN_ON(ringbuf->reserved_in_use);
 
ringbuf->reserved_size = 0;
ringbuf->reserved_in_use = false;
}
 
static int __intel_ring_prepare(struct intel_engine_cs *ring,
int bytes)
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
{
WARN_ON(ringbuf->reserved_in_use);
 
ringbuf->reserved_in_use = true;
ringbuf->reserved_tail = ringbuf->tail;
}
 
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
{
WARN_ON(!ringbuf->reserved_in_use);
if (ringbuf->tail > ringbuf->reserved_tail) {
WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
"request reserved size too small: %d vs %d!\n",
ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
} else {
/*
* The ring was wrapped while the reserved space was in use.
* That means that some unknown amount of the ring tail was
* no-op filled and skipped. Thus simply adding the ring size
* to the tail and doing the above space check will not work.
* Rather than attempt to track how much tail was skipped,
* it is much simpler to say that also skipping the sanity
* check every once in a while is not a big issue.
*/
}
 
ringbuf->reserved_size = 0;
ringbuf->reserved_in_use = false;
}
 
static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
int remain_usable = ringbuf->effective_size - ringbuf->tail;
int remain_actual = ringbuf->size - ringbuf->tail;
int ret, total_bytes, wait_bytes = 0;
bool need_wrap = false;
 
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
if (ringbuf->reserved_in_use)
total_bytes = bytes;
else
total_bytes = bytes + ringbuf->reserved_size;
 
if (unlikely(bytes > remain_usable)) {
/*
* Not enough space for the basic request. So need to flush
* out the remainder and then wait for base + reserved.
*/
wait_bytes = remain_actual + total_bytes;
need_wrap = true;
} else {
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
* falls off the end. So only need to to wait for the
* reserved size after flushing out the remainder.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
}
}
 
if (unlikely(ringbuf->space < bytes)) {
ret = ring_wait_for_space(ring, bytes);
if (wait_bytes) {
ret = ring_wait_for_space(ring, wait_bytes);
if (unlikely(ret))
return ret;
 
if (need_wrap)
__wrap_ring_buffer(ringbuf);
}
 
return 0;
}
 
int intel_ring_begin(struct intel_engine_cs *ring,
int intel_ring_begin(struct drm_i915_gem_request *req,
int num_dwords)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *ring;
struct drm_i915_private *dev_priv;
int ret;
 
WARN_ON(req == NULL);
ring = req->ring;
dev_priv = ring->dev->dev_private;
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
2078,18 → 2382,14
if (ret)
return ret;
 
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
return ret;
 
ring->buffer->space -= num_dwords * sizeof(uint32_t);
return 0;
}
 
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct intel_engine_cs *ring)
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
 
2097,7 → 2397,7
return 0;
 
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
ret = intel_ring_begin(ring, num_dwords);
ret = intel_ring_begin(req, num_dwords);
if (ret)
return ret;
 
2114,8 → 2414,6
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
BUG_ON(ring->outstanding_lazy_seqno);
 
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
2160,13 → 2458,14
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
 
static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
struct intel_engine_cs *ring = req->ring;
uint32_t cmd;
int ret;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
2173,6 → 2472,14
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1;
 
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
2180,8 → 2487,8
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
2196,19 → 2503,23
}
 
static int
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned flags)
unsigned dispatch_flags)
{
bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
struct intel_engine_cs *ring = req->ring;
bool ppgtt = USES_PPGTT(ring->dev) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
/* FIXME(BDW): Address space and security selectors. */
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_emit(ring, MI_NOOP);
2218,20 → 2529,23
}
 
static int
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned flags)
unsigned dispatch_flags)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
ret = intel_ring_begin(ring, 2);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
 
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
2240,19 → 2554,21
}
 
static int
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned flags)
unsigned dispatch_flags)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
ret = intel_ring_begin(ring, 2);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
 
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
2262,21 → 2578,29
 
/* Blitter support (SandyBridge+) */
 
static int gen6_ring_flush(struct intel_engine_cs *ring,
static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
 
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
if (INTEL_INFO(dev)->gen >= 8)
cmd += 1;
 
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
2284,11 → 2608,10
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
MI_FLUSH_DW_OP_STOREDW;
cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
if (INTEL_INFO(dev)->gen >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
2297,13 → 2620,6
}
intel_ring_advance(ring);
 
if (!invalidate && flush) {
if (IS_GEN7(dev))
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
else if (IS_BROADWELL(dev))
dev_priv->fbc.need_sw_cache_clean = true;
}
 
return 0;
}
 
2336,7 → 2652,7
}
}
 
ring->init_context = intel_ring_workarounds_emit;
ring->init_context = intel_rcs_ctx_init;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
2351,6 → 2667,7
GEN8_RING_SEMAPHORE_INIT;
}
} else if (INTEL_INFO(dev)->gen >= 6) {
ring->init_context = intel_rcs_ctx_init;
ring->add_request = gen6_add_request;
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
2421,7 → 2738,7
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
ring->init = init_render_ring;
ring->init_hw = init_render_ring;
ring->cleanup = render_ring_cleanup;
 
/* Workaround batchbuffer to combat CS tlb bug. */
2443,9 → 2760,19
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
 
return intel_init_ring_buffer(dev, ring);
ret = intel_init_ring_buffer(dev, ring);
if (ret)
return ret;
 
if (INTEL_INFO(dev)->gen >= 5) {
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
}
 
return 0;
}
 
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2514,14 → 2841,13
}
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
}
ring->init = init_ring_common;
ring->init_hw = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
}
 
/**
* Initialize the second BSD ring for Broadwell GT3.
* It is noted that this only exists on Broadwell GT3.
* Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
*/
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
2528,11 → 2854,6
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
 
if ((INTEL_INFO(dev)->gen != 8)) {
DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
return -EINVAL;
}
 
ring->name = "bsd2 ring";
ring->id = VCS2;
 
2553,7 → 2874,7
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
ring->init = init_ring_common;
ring->init_hw = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
}
2610,7 → 2931,7
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
ring->init = init_ring_common;
ring->init_hw = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
}
2661,24 → 2982,25
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
ring->init = init_ring_common;
ring->init_hw = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
}
 
int
intel_ring_flush_all_caches(struct intel_engine_cs *ring)
intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
int ret;
 
if (!ring->gpu_caches_dirty)
return 0;
 
ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
 
trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
ring->gpu_caches_dirty = false;
return 0;
2685,8 → 3007,9
}
 
int
intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
uint32_t flush_domains;
int ret;
 
2694,11 → 3017,11
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
 
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
 
trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
ring->gpu_caches_dirty = false;
return 0;
/drivers/video/drm/i915/intel_ringbuffer.h
2,6 → 2,7
#define _INTEL_RINGBUFFER_H_
 
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
 
#define I915_CMD_HASH_ORDER 9
 
11,6 → 12,7
* workarounds!
*/
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
 
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
99,18 → 101,14
 
struct intel_engine_cs *ring;
 
/*
* FIXME: This backpointer is an artifact of the history of how the
* execlist patches came into being. It will get removed once the basic
* code has landed.
*/
struct intel_context *FIXME_lrc_ctx;
 
u32 head;
u32 tail;
int space;
int size;
int effective_size;
int reserved_size;
int reserved_tail;
bool reserved_in_use;
 
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
123,6 → 121,28
u32 last_retired_head;
};
 
struct intel_context;
struct drm_i915_reg_descriptor;
 
/*
* we use a single page to load ctx workarounds so all of these
* values are referred in terms of dwords
*
* struct i915_wa_ctx_bb:
* offset: specifies batch starting position, also helpful in case
* if we want to have multiple batches at different offsets based on
* some criteria. It is not a requirement at the moment but provides
* an option for future use.
* size: size of the batch in DWORDS
*/
struct i915_ctx_workarounds {
struct i915_wa_ctx_bb {
u32 offset;
u32 size;
} indirect_ctx, per_ctx;
struct drm_i915_gem_object *obj;
};
 
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
138,25 → 158,32
struct drm_device *dev;
struct intel_ringbuffer *buffer;
 
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*/
struct i915_gem_batch_pool batch_pool;
 
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
 
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
struct drm_i915_gem_request *trace_irq_req;
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
void (*irq_put)(struct intel_engine_cs *ring);
 
int (*init)(struct intel_engine_cs *ring);
int (*init_hw)(struct intel_engine_cs *ring);
 
int (*init_context)(struct intel_engine_cs *ring,
struct intel_context *ctx);
int (*init_context)(struct drm_i915_gem_request *req);
 
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
int __must_check (*flush)(struct intel_engine_cs *ring,
int __must_check (*flush)(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains);
int (*add_request)(struct intel_engine_cs *ring);
int (*add_request)(struct drm_i915_gem_request *req);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
167,11 → 194,12
bool lazy_coherency);
void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned flags);
unsigned dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
#define I915_DISPATCH_RS 0x4
void (*cleanup)(struct intel_engine_cs *ring);
 
/* GEN8 signal/wait table - never trust comments!
225,10 → 253,10
};
 
/* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring,
struct intel_engine_cs *to,
int (*sync_to)(struct drm_i915_gem_request *to_req,
struct intel_engine_cs *from,
u32 seqno);
int (*signal)(struct intel_engine_cs *signaller,
int (*signal)(struct drm_i915_gem_request *signaller_req,
/* num_dwords needed by caller */
unsigned int num_dwords);
} semaphore;
239,12 → 267,12
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
u32 invalidate_domains,
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
u64 offset, unsigned flags);
int (*emit_bb_start)(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags);
 
/**
* List of objects currently involved in rendering from the
251,7 → 279,7
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
265,12 → 293,13
struct list_head request_list;
 
/**
* Do we have some not yet emitted requests outstanding?
* Seqno of request most recently submitted to request_list.
* Used exclusively by hang checker to avoid grabbing lock while
* inspecting request list.
*/
struct drm_i915_gem_request *preallocated_lazy_request;
u32 outstanding_lazy_seqno;
u32 last_submitted_seqno;
 
bool gpu_caches_dirty;
bool fbc_dirty;
 
wait_queue_head_t irq_queue;
 
296,7 → 325,7
/*
* Table of registers allowed in commands that read/write registers.
*/
const u32 *reg_table;
const struct drm_i915_reg_descriptor *reg_table;
int reg_count;
 
/*
303,7 → 332,7
* Table of registers allowed in commands that read/write registers, but
* only from the DRM master.
*/
const u32 *master_reg_table;
const struct drm_i915_reg_descriptor *master_reg_table;
int master_reg_count;
 
/*
348,6 → 377,13
return idx;
}
 
static inline void
intel_flush_status_page(struct intel_engine_cs *ring, int reg)
{
drm_clflush_virt_range(&ring->status_page.page_addr[reg],
sizeof(uint32_t));
}
 
static inline u32
intel_read_status_page(struct intel_engine_cs *ring,
int reg)
376,25 → 412,28
* 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45)
* 0x20-0x2f: Reserved (Gen6+)
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
 
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
 
int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
static inline void intel_ring_emit(struct intel_engine_cs *ring,
u32 data)
{
408,14 → 447,14
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
 
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);
427,7 → 466,6
int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
 
int init_workarounds_ring(struct intel_engine_cs *ring);
 
436,16 → 474,29
return ringbuf->tail;
}
 
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
* is ILK at 136 words. Reserving too much is better than reserving too little
* as that allows for corner cases that might have been missed. So the figure
* has been rounded up to 160 words.
*/
#define MIN_SPACE_FOR_ADD_REQUEST 160
 
static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;
}
/*
* Reserve space in the ring to guarantee that the i915_add_request() call
* will always have sufficient room to do its stuff. The request creation
* code calls this automatically.
*/
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
/* Cancel the reservation, e.g. because the request is being discarded. */
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
/* Use the reserved space - for use by i915_add_request() only. */
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
/* Finish with the reserved space - for use by i915_add_request() only. */
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
 
/* Legacy ringbuffer specific portion of reservation code: */
int intel_ring_reserve_space(struct drm_i915_gem_request *request);
 
#endif /* _INTEL_RINGBUFFER_H_ */
/drivers/video/drm/i915/intel_runtime_pm.c
26,12 → 26,11
*
*/
 
//#include <linux/pm_runtime.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
 
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/i915_powerwell.h>
#include <linux/vgaarb.h>
 
/**
* DOC: runtime pm
50,7 → 49,8
* present for a given platform.
*/
 
static struct i915_power_domains *hsw_pwr;
#define GEN9_ENABLE_DC5(dev) 0
#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
 
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
65,6 → 65,25
i--) \
if ((power_well)->domains & (domain_mask))
 
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
 
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
 
static void intel_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
 
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
118,7 → 137,7
}
 
/**
* intel_display_power_is_enabled - unlocked check for a power domain
* intel_display_power_is_enabled - check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
197,10 → 216,42
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
 
if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
gen8_irq_power_well_post_enable(dev_priv);
if (IS_BROADWELL(dev))
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
 
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
struct drm_device *dev = dev_priv->dev;
 
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
* we'll get unclaimed register interrupts. This stops after we write
* anything to the VGA MSR register. The vgacon module uses this
* register all the time, so if we unbind our driver and, as a
* consequence, bind vgacon, we'll get stuck in an infinite loop at
* console_unlock(). So make here we touch the VGA MSR register, making
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
if (power_well->data == SKL_DISP_PW_2) {
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
 
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
}
 
if (power_well->data == SKL_DISP_PW_1) {
if (!dev_priv->power_domains.initializing)
intel_prepare_ddi(dev);
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
}
}
 
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
233,6 → 284,427
}
}
 
#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
BIT(POWER_DOMAIN_INIT))
 
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
BIT(POWER_DOMAIN_INIT))
 
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
"DC9 already programmed to be enabled.\n");
WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled to enable DC9.\n");
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
 
/*
* TODO: check for the following to verify the conditions to enter DC9
* state are satisfied:
* 1] Check relevant display engine registers to verify if mode set
* disable sequence was followed.
* 2] Check if display uninitialize sequence is initialized.
*/
}
 
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
{
WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
"DC9 already programmed to be disabled.\n");
WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled.\n");
 
/*
* TODO: check for the following to verify DC9 state was indeed
* entered before programming to disable it:
* 1] Check relevant display engine registers to verify if mode
* set disable sequence was followed.
* 2] Check if display uninitialize sequence is initialized.
*/
}
 
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_enable_dc9(dev_priv);
 
DRM_DEBUG_KMS("Enabling DC9\n");
 
val = I915_READ(DC_STATE_EN);
val |= DC_STATE_EN_DC9;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
}
 
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_disable_dc9(dev_priv);
 
DRM_DEBUG_KMS("Disabling DC9\n");
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_DC9;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
}
 
static void gen9_set_dc_state_debugmask_memory_up(
struct drm_i915_private *dev_priv)
{
uint32_t val;
 
/* The below bit doesn't need to be cleared ever afterwards */
val = I915_READ(DC_STATE_DEBUG);
if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
I915_WRITE(DC_STATE_DEBUG, val);
POSTING_READ(DC_STATE_DEBUG);
}
}
 
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
 
WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
WARN_ONCE(dev_priv->pm.suspended,
"DC5 cannot be enabled, if platform is runtime-suspended.\n");
 
assert_csr_loaded(dev_priv);
}
 
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
{
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
/*
* During initialization, the firmware may not be loaded yet.
* We still want to make sure that the DC enabling flag is cleared.
*/
if (dev_priv->power_domains.initializing)
return;
 
WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
WARN_ONCE(dev_priv->pm.suspended,
"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
}
 
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_enable_dc5(dev_priv);
 
DRM_DEBUG_KMS("Enabling DC5\n");
 
gen9_set_dc_state_debugmask_memory_up(dev_priv);
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
val |= DC_STATE_EN_UPTO_DC5;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
}
 
static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_disable_dc5(dev_priv);
 
DRM_DEBUG_KMS("Disabling DC5\n");
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_UPTO_DC5;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
}
 
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n");
 
assert_csr_loaded(dev_priv);
}
 
static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
{
/*
* During initialization, the firmware may not be loaded yet.
* We still want to make sure that the DC enabling flag is cleared.
*/
if (dev_priv->power_domains.initializing)
return;
 
assert_csr_loaded(dev_priv);
WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be disabled.\n");
}
 
static void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_enable_dc6(dev_priv);
 
DRM_DEBUG_KMS("Enabling DC6\n");
 
gen9_set_dc_state_debugmask_memory_up(dev_priv);
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
val |= DC_STATE_EN_UPTO_DC6;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
}
 
static void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
uint32_t val;
 
assert_can_disable_dc6(dev_priv);
 
DRM_DEBUG_KMS("Disabling DC6\n");
 
val = I915_READ(DC_STATE_EN);
val &= ~DC_STATE_EN_UPTO_DC6;
I915_WRITE(DC_STATE_EN, val);
POSTING_READ(DC_STATE_EN);
}
 
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
struct drm_device *dev = dev_priv->dev;
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
bool is_enabled, enable_requested, check_fuse_status = false;
 
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
fuse_status = I915_READ(SKL_FUSE_STATUS);
 
switch (power_well->data) {
case SKL_DISP_PW_1:
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
SKL_FUSE_PG0_DIST_STATUS), 1)) {
DRM_ERROR("PG0 not enabled\n");
return;
}
break;
case SKL_DISP_PW_2:
if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
DRM_ERROR("PG1 in disabled state\n");
return;
}
break;
case SKL_DISP_PW_DDI_A_E:
case SKL_DISP_PW_DDI_B:
case SKL_DISP_PW_DDI_C:
case SKL_DISP_PW_DDI_D:
case SKL_DISP_PW_MISC_IO:
break;
default:
WARN(1, "Unknown power well %lu\n", power_well->data);
return;
}
 
req_mask = SKL_POWER_WELL_REQ(power_well->data);
enable_requested = tmp & req_mask;
state_mask = SKL_POWER_WELL_STATE(power_well->data);
is_enabled = tmp & state_mask;
 
if (enable) {
if (!enable_requested) {
WARN((tmp & state_mask) &&
!I915_READ(HSW_PWR_WELL_BIOS),
"Invalid for power well status to be enabled, unless done by the BIOS, \
when request is to disable!\n");
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
power_well->data == SKL_DISP_PW_2) {
if (SKL_ENABLE_DC6(dev)) {
skl_disable_dc6(dev_priv);
/*
* DDI buffer programming unnecessary during driver-load/resume
* as it's already done during modeset initialization then.
* It's also invalid here as encoder list is still uninitialized.
*/
if (!dev_priv->power_domains.initializing)
intel_prepare_ddi(dev);
} else {
gen9_disable_dc5(dev_priv);
}
}
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
}
 
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
state_mask), 1))
DRM_ERROR("%s enable timeout\n",
power_well->name);
check_fuse_status = true;
}
} else {
if (enable_requested) {
if (IS_SKYLAKE(dev) &&
(power_well->data == SKL_DISP_PW_1) &&
(intel_csr_load_status_get(dev_priv) == FW_LOADED))
DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
else {
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
}
 
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
power_well->data == SKL_DISP_PW_2) {
enum csr_state state;
/* TODO: wait for a completion event or
* similar here instead of busy
* waiting using wait_for function.
*/
wait_for((state = intel_csr_load_status_get(dev_priv)) !=
FW_UNINITIALIZED, 1000);
if (state != FW_LOADED)
DRM_DEBUG("CSR firmware not ready (%d)\n",
state);
else
if (SKL_ENABLE_DC6(dev))
skl_enable_dc6(dev_priv);
else
gen9_enable_dc5(dev_priv);
}
}
}
 
if (check_fuse_status) {
if (power_well->data == SKL_DISP_PW_1) {
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
SKL_FUSE_PG1_DIST_STATUS), 1))
DRM_ERROR("PG1 distributing status timeout\n");
} else if (power_well->data == SKL_DISP_PW_2) {
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
SKL_FUSE_PG2_DIST_STATUS), 1))
DRM_ERROR("PG2 distributing status timeout\n");
}
}
 
if (enable && !is_enabled)
skl_power_well_post_enable(dev_priv, power_well);
}
 
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
258,6 → 730,36
hsw_set_power_well(dev_priv, power_well, false);
}
 
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
SKL_POWER_WELL_STATE(power_well->data);
 
return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
}
 
static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
skl_set_power_well(dev_priv, power_well, power_well->count > 0);
 
/* Clear any request made by BIOS as driver is taking over */
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
 
static void skl_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
skl_set_power_well(dev_priv, power_well, true);
}
 
static void skl_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
skl_set_power_well(dev_priv, power_well, false);
}
 
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
295,7 → 797,7
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
 
if (wait_for(COND, 100))
DRM_ERROR("timout setting power well state %08x (%08x)\n",
DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
 
359,13 → 861,28
return enabled;
}
 
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
enum pipe pipe;
 
vlv_set_power_well(dev_priv, power_well, true);
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection. Supposedly DSI also
* needs the ref clock up and running.
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
for_each_pipe(dev_priv->dev, pipe) {
u32 val = I915_READ(DPLL(pipe));
 
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
I915_WRITE(DPLL(pipe), val);
}
 
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
377,23 → 894,38
if (dev_priv->power_domains.initializing)
return;
 
intel_hpd_init(dev_priv);
// intel_hpd_init(dev_priv);
 
i915_redisable_vga_power_on(dev_priv->dev);
}
 
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
vlv_power_sequencer_reset(dev_priv);
}
 
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
vlv_set_power_well(dev_priv, power_well, true);
 
vlv_display_power_well_init(dev_priv);
}
 
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
vlv_display_power_well_deinit(dev_priv);
 
vlv_set_power_well(dev_priv, power_well, false);
 
vlv_power_sequencer_reset(dev_priv);
}
 
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
401,13 → 933,7
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection.
*/
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
vlv_set_power_well(dev_priv, power_well, true);
442,30 → 968,149
vlv_set_power_well(dev_priv, power_well, false);
}
 
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
int power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
if (power_well->data == power_well_id)
return power_well;
}
 
return NULL;
}
 
#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
 
static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
u32 phy_control = dev_priv->chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
u32 tmp;
 
/*
* The BIOS can leave the PHY is some weird state
* where it doesn't fully power down some parts.
* Disable the asserts until the PHY has been fully
* reset (ie. the power well has been disabled at
* least once).
*/
if (!dev_priv->chv_phy_assert[DPIO_PHY0])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
 
if (!dev_priv->chv_phy_assert[DPIO_PHY1])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
 
if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY0);
 
/* this assumes override is only used to enable lanes */
if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
 
if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
 
/* CL1 is on whenever anything is on in either channel */
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
 
/*
* The DPLLB check accounts for the pipe B + port A usage
* with CL2 powered up but all the lanes in the second channel
* powered down.
*/
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
(I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
 
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
 
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
}
 
if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY1);
 
/* this assumes override is only used to enable lanes */
if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
 
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
 
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
}
 
phy_status &= phy_status_mask;
 
/*
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
WARN(phy_status != tmp,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
tmp, phy_status, dev_priv->chv_phy_control);
}
 
#undef BITS_SET
 
static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum dpio_phy phy;
enum pipe pipe;
uint32_t tmp;
 
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
 
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection.
*/
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
pipe = PIPE_A;
phy = DPIO_PHY0;
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV);
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
} else {
pipe = PIPE_C;
phy = DPIO_PHY1;
I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
}
 
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
 
473,10 → 1118,40
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
 
I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
PHY_COM_LANE_RESET_DEASSERT(phy));
mutex_lock(&dev_priv->sb_lock);
 
/* Enable dynamic power down */
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
 
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
* too, so maybe it saves some power even though
* CL2 doesn't exist?
*/
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
}
 
mutex_unlock(&dev_priv->sb_lock);
 
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
 
DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
phy, dev_priv->chv_phy_control);
 
assert_chv_phy_status(dev_priv);
}
 
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
494,12 → 1169,143
assert_pll_disabled(dev_priv, PIPE_C);
}
 
I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
~PHY_COM_LANE_RESET_DEASSERT(phy));
dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
 
vlv_set_power_well(dev_priv, power_well, false);
 
DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
phy, dev_priv->chv_phy_control);
 
/* PHY is fully reset now, so we can enable the PHY state asserts */
dev_priv->chv_phy_assert[phy] = true;
 
assert_chv_phy_status(dev_priv);
}
 
static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
u32 reg, val, expected, actual;
 
/*
* The BIOS can leave the PHY is some weird state
* where it doesn't fully power down some parts.
* Disable the asserts until the PHY has been fully
* reset (ie. the power well has been disabled at
* least once).
*/
if (!dev_priv->chv_phy_assert[phy])
return;
 
if (ch == DPIO_CH0)
reg = _CHV_CMN_DW0_CH0;
else
reg = _CHV_CMN_DW6_CH1;
 
mutex_lock(&dev_priv->sb_lock);
val = vlv_dpio_read(dev_priv, pipe, reg);
mutex_unlock(&dev_priv->sb_lock);
 
/*
* This assumes !override is only used when the port is disabled.
* All lanes should power down even without the override when
* the port is disabled.
*/
if (!override || mask == 0xf) {
expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
/*
* If CH1 common lane is not active anymore
* (eg. for pipe B DPLL) the entire channel will
* shut down, which causes the common lane registers
* to read as 0. That means we can't actually check
* the lane power down status bits, but as the entire
* register reads as 0 it's a good indication that the
* channel is indeed entirely powered down.
*/
if (ch == DPIO_CH1 && val == 0)
expected = 0;
} else if (mask != 0x0) {
expected = DPIO_ANYDL_POWERDOWN;
} else {
expected = 0;
}
 
if (ch == DPIO_CH0)
actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
else
actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
 
WARN(actual != expected,
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
!!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
!!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
reg, val);
}
 
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
bool was_override;
 
mutex_lock(&power_domains->lock);
 
was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
 
if (override == was_override)
goto out;
 
if (override)
dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
 
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
 
DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
phy, ch, dev_priv->chv_phy_control);
 
assert_chv_phy_status(dev_priv);
 
out:
mutex_unlock(&power_domains->lock);
 
return was_override;
}
 
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
 
mutex_lock(&power_domains->lock);
 
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
 
if (override)
dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
 
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
 
DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
phy, ch, mask, dev_priv->chv_phy_control);
 
assert_chv_phy_status(dev_priv);
 
assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
 
mutex_unlock(&power_domains->lock);
}
 
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
553,7 → 1359,7
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
 
if (wait_for(COND, 100))
DRM_ERROR("timout setting power well state %08x (%08x)\n",
DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
 
566,6 → 1372,8
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PIPE_A);
 
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
 
572,72 → 1380,23
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PIPE_A &&
power_well->data != PIPE_B &&
power_well->data != PIPE_C);
WARN_ON_ONCE(power_well->data != PIPE_A);
 
chv_set_pipe_power_well(dev_priv, power_well, true);
 
if (power_well->data == PIPE_A) {
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
/*
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
if (dev_priv->power_domains.initializing)
return;
 
intel_hpd_init(dev_priv);
 
i915_redisable_vga_power_on(dev_priv->dev);
vlv_display_power_well_init(dev_priv);
}
}
 
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PIPE_A &&
power_well->data != PIPE_B &&
power_well->data != PIPE_C);
WARN_ON_ONCE(power_well->data != PIPE_A);
 
if (power_well->data == PIPE_A) {
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
vlv_display_power_well_deinit(dev_priv);
 
chv_set_pipe_power_well(dev_priv, power_well, false);
 
if (power_well->data == PIPE_A)
vlv_power_sequencer_reset(dev_priv);
}
 
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
 
if (power_well->always_on || !i915.disable_power_well) {
if (!enabled)
goto mismatch;
 
return;
}
 
if (enabled != (power_well->count > 0))
goto mismatch;
 
return;
 
mismatch:
WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
power_well->name, power_well->always_on, enabled,
power_well->count, i915.disable_power_well);
}
 
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
664,15 → 1423,10
mutex_lock(&power_domains->lock);
 
for_each_power_well(i, power_well, BIT(domain), power_domains) {
if (!power_well->count++) {
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
if (!power_well->count++)
intel_power_well_enable(dev_priv, power_well);
}
 
check_power_well_state(dev_priv, power_well);
}
 
power_domains->domain_use_count[domain]++;
 
mutex_unlock(&power_domains->lock);
704,22 → 1458,15
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
WARN_ON(!power_well->count);
 
if (!--power_well->count && i915.disable_power_well) {
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
if (!--power_well->count && i915.disable_power_well)
intel_power_well_disable(dev_priv, power_well);
}
 
check_power_well_state(dev_priv, power_well);
}
 
mutex_unlock(&power_domains->lock);
 
intel_runtime_pm_put(dev_priv);
}
 
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
733,6 → 1480,11
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
754,59 → 1506,47
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_PIPE_A_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_PIPE_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_PIPE_C_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_always_on_power_well_noop,
.enable = i9xx_always_on_power_well_noop,
844,6 → 1584,13
.is_enabled = hsw_power_well_enabled,
};
 
static const struct i915_power_well_ops skl_power_well_ops = {
.sync_hw = skl_power_well_sync_hw,
.enable = skl_power_well_enable,
.disable = skl_power_well_disable,
.is_enabled = skl_power_well_enabled,
};
 
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
957,121 → 1704,128
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
#if 0
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
#endif
{
.name = "pipe-a",
/*
* FIXME: pipe A power well seems to be the new disp2d well.
* At least all registers seem to be housed there. Figure
* out if this a a temporary situation in pre-production
* hardware or a permanent state of affairs.
* Pipe A power well is the new disp2d well. Pipe B and C
* power wells don't actually exist. Pipe A power well is
* required for any pipe to work.
*/
.domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
.domains = VLV_DISPLAY_POWER_DOMAINS,
.data = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
#if 0
{
.name = "pipe-b",
.domains = CHV_PIPE_B_POWER_DOMAINS,
.data = PIPE_B,
.ops = &chv_pipe_power_well_ops,
},
{
.name = "pipe-c",
.domains = CHV_PIPE_C_POWER_DOMAINS,
.data = PIPE_C,
.ops = &chv_pipe_power_well_ops,
},
#endif
{
.name = "dpio-common-bc",
/*
* XXX: cmnreset for one PHY seems to disturb the other.
* As a workaround keep both powered on at the same
* time for now.
*/
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
/*
* XXX: cmnreset for one PHY seems to disturb the other.
* As a workaround keep both powered on at the same
* time for now.
*/
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
#if 0
};
 
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id)
{
.name = "dpio-tx-b-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
struct i915_power_well *power_well;
bool ret;
 
power_well = lookup_power_well(dev_priv, power_well_id);
ret = power_well->ops->is_enabled(dev_priv, power_well);
 
return ret;
}
 
static struct i915_power_well skl_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "dpio-tx-b-23",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
.name = "power well 1",
.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_1,
},
{
.name = "dpio-tx-c-01",
.domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
.name = "MISC IO power well",
.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_MISC_IO,
},
{
.name = "dpio-tx-c-23",
.domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
.name = "power well 2",
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_2,
},
{
.name = "dpio-tx-d-01",
.domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
.name = "DDI A/E power well",
.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_DDI_A_E,
},
{
.name = "dpio-tx-d-23",
.domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
.name = "DDI B power well",
.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_DDI_B,
},
#endif
{
.name = "DDI C power well",
.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D power well",
.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_DDI_D,
},
};
 
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id)
static struct i915_power_well bxt_power_wells[] = {
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
.name = "always-on",
.always_on = 1,
.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "power well 1",
.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_1,
},
{
.name = "power well 2",
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_2,
}
};
 
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
if (power_well->data == power_well_id)
return power_well;
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
{
if (disable_power_well >= 0)
return !!disable_power_well;
 
if (IS_SKYLAKE(dev_priv)) {
DRM_DEBUG_KMS("Disabling display power well support\n");
return 0;
}
 
return NULL;
return 1;
}
 
#define set_power_wells(power_domains, __power_wells) ({ \
1090,6 → 1844,11
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
i915.disable_power_well);
 
BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
 
mutex_init(&power_domains->lock);
 
/*
1098,10 → 1857,12
*/
if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
hsw_pwr = power_domains;
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
} else if (IS_SKYLAKE(dev_priv->dev)) {
set_power_wells(power_domains, skl_power_wells);
} else if (IS_BROXTON(dev_priv->dev)) {
set_power_wells(power_domains, bxt_power_wells);
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
1126,7 → 1887,6
 
/* Make sure we're not suspended first. */
pm_runtime_get_sync(device);
pm_runtime_disable(device);
}
 
/**
1145,8 → 1905,6
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_display_set_init_power(dev_priv, true);
 
hsw_pwr = NULL;
}
 
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1164,6 → 1922,93
mutex_unlock(&power_domains->lock);
}
 
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
 
/*
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
* workaround never ever read DISPLAY_PHY_CONTROL, and
* instead maintain a shadow copy ourselves. Use the actual
* power well state and lane status to reconstruct the
* expected initial value.
*/
dev_priv->chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
 
/*
* If all lanes are disabled we leave the override disabled
* with all power down bits cleared to match the state we
* would use after disabling the port. Otherwise enable the
* override and set the lane powerdown bits accding to the
* current lane status.
*/
if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
uint32_t status = I915_READ(DPLL(PIPE_A));
unsigned int mask;
 
mask = status & DPLL_PORTB_READY_MASK;
if (mask == 0xf)
mask = 0x0;
else
dev_priv->chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
 
dev_priv->chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
 
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
if (mask == 0xf)
mask = 0x0;
else
dev_priv->chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
 
dev_priv->chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
 
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
 
dev_priv->chv_phy_assert[DPIO_PHY0] = false;
} else {
dev_priv->chv_phy_assert[DPIO_PHY0] = true;
}
 
if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
uint32_t status = I915_READ(DPIO_PHY_STATUS);
unsigned int mask;
 
mask = status & DPLL_PORTD_READY_MASK;
 
if (mask == 0xf)
mask = 0x0;
else
dev_priv->chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
 
dev_priv->chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
 
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
 
dev_priv->chv_phy_assert[DPIO_PHY1] = false;
} else {
dev_priv->chv_phy_assert[DPIO_PHY1] = true;
}
 
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
 
DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
dev_priv->chv_phy_control);
}
 
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
1206,8 → 2051,12
 
power_domains->initializing = true;
 
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
if (IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
mutex_unlock(&power_domains->lock);
} else if (IS_VALLEYVIEW(dev)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
}
1219,36 → 2068,6
}
 
/**
* intel_aux_display_runtime_get - grab an auxilliary power domain reference
* @dev_priv: i915 device instance
*
* This function grabs a power domain reference for the auxiliary power domain
* (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
* parents are powered up. Therefore users should only grab a reference to the
* innermost power domain they need.
*
* Any power domain reference obtained by this function must have a symmetric
* call to intel_aux_display_runtime_put() to release the reference again.
*/
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_get(dev_priv);
}
 
/**
* intel_aux_display_runtime_put - release an auxilliary power domain reference
* @dev_priv: i915 device instance
*
* This function drops the auxilliary power domain reference obtained by
* intel_aux_display_runtime_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_put(dev_priv);
}
 
/**
* intel_runtime_pm_get - grab a runtime pm reference
* @dev_priv: i915 device instance
*
1296,7 → 2115,7
return;
 
WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
// pm_runtime_get_noresume(device);
pm_runtime_get_noresume(device);
}
 
/**
1315,8 → 2134,8
if (!HAS_RUNTIME_PM(dev))
return;
 
// pm_runtime_mark_last_busy(device);
// pm_runtime_put_autosuspend(device);
pm_runtime_mark_last_busy(device);
pm_runtime_put_autosuspend(device);
}
 
/**
1337,8 → 2156,6
if (!HAS_RUNTIME_PM(dev))
return;
 
pm_runtime_set_active(device);
 
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
1348,59 → 2165,10
return;
}
 
// pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
// pm_runtime_mark_last_busy(device);
// pm_runtime_use_autosuspend(device);
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
pm_runtime_mark_last_busy(device);
pm_runtime_use_autosuspend(device);
 
// pm_runtime_put_autosuspend(device);
pm_runtime_put_autosuspend(device);
}
 
/* Display audio driver power well request */
int i915_request_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
 
/* Display audio driver power well release */
int i915_release_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
 
/*
* Private interface for the audio driver to get CDCLK in kHz.
*
* Caller must request power well using i915_request_power_well() prior to
* making the call.
*/
int i915_get_cdclk_freq(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
 
return intel_ddi_get_cdclk_freq(dev_priv);
}
EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
/drivers/video/drm/i915/intel_sdvo.c
30,6 → 30,7
#include <linux/delay.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
52,7 → 53,7
#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
static const char *tv_format_names[] = {
static const char * const tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
"PAL_H" , "PAL_I" , "PAL_M" ,
62,7 → 63,7
"SECAM_60"
};
 
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
#define TV_FORMAT_NUM ARRAY_SIZE(tv_format_names)
 
struct intel_sdvo {
struct intel_encoder base;
106,6 → 107,11
bool color_range_auto;
 
/**
* HDMI user specified aspect ratio
*/
enum hdmi_picture_aspect aspect_ratio;
 
/**
* This is set if we're going to treat the device as TV-out.
*
* While we have these nice friendly flags for output types that ought
241,7 → 247,15
 
if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
I915_READ(intel_sdvo->sdvo_reg);
POSTING_READ(intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
if (HAS_PCH_IBX(dev)) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg);
}
return;
}
 
258,9 → 272,9
for (i = 0; i < 2; i++)
{
I915_WRITE(GEN3_SDVOB, bval);
I915_READ(GEN3_SDVOB);
POSTING_READ(GEN3_SDVOB);
I915_WRITE(GEN3_SDVOC, cval);
I915_READ(GEN3_SDVOC);
POSTING_READ(GEN3_SDVOC);
}
}
 
443,7 → 457,7
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
}
 
static const char *cmd_status_names[] = {
static const char * const cmd_status_names[] = {
"Power on",
"Success",
"Not supported",
594,11 → 608,11
return false;
}
 
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
static int intel_sdvo_get_pixel_multiplier(const struct drm_display_mode *adjusted_mode)
{
if (mode->clock >= 100000)
if (adjusted_mode->crtc_clock >= 100000)
return 1;
else if (mode->clock >= 50000)
else if (adjusted_mode->crtc_clock >= 50000)
return 2;
else
return 4;
1007,7 → 1021,7
}
 
if (intel_sdvo->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
if (intel_crtc->config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
1085,7 → 1099,7
return true;
}
 
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
{
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
1112,11 → 1126,11
}
 
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->base.mode;
 
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
1172,6 → 1186,10
if (intel_sdvo->is_tv)
i9xx_adjust_sdvo_tv_clock(pipe_config);
 
/* Set user selected PAR to incoming mode's member */
if (intel_sdvo->is_hdmi)
adjusted_mode->picture_aspect_ratio = intel_sdvo->aspect_ratio;
 
return true;
}
 
1180,9 → 1198,8
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&crtc->config.adjusted_mode;
struct drm_display_mode *mode = &crtc->config.requested_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct drm_display_mode *mode = &crtc->config->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
1224,7 → 1241,7
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
 
if (crtc->config.has_hdmi_sink) {
if (crtc->config->has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
1244,9 → 1261,9
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
 
switch (crtc->config.pixel_multiplier) {
switch (crtc->config->pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
WARN(1, "unknown pixel multiplier specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
1259,7 → 1276,7
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
1289,7 → 1306,7
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc->config.pixel_multiplier - 1)
sdvox |= (crtc->config->pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
 
1338,7 → 1355,7
}
 
static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1370,7 → 1387,7
flags |= DRM_MODE_FLAG_NVSYNC;
}
 
pipe_config->adjusted_mode.flags |= flags;
pipe_config->base.adjusted_mode.flags |= flags;
 
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
1392,7 → 1409,7
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
pipe_config->adjusted_mode.crtc_clock = dotclock;
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
1428,6 → 1445,7
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
 
intel_sdvo_set_active_outputs(intel_sdvo, 0);
1436,33 → 1454,32
DRM_MODE_DPMS_OFF);
 
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
/* HW workaround for IBX, we need to move the port to
* transcoder A before disabling it. */
if (HAS_PCH_IBX(encoder->base.dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
 
if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
 
/*
* HW workaround for IBX, we need to move the port
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
temp &= ~SDVO_PIPE_B_SELECT;
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
 
/* Again we need to write this twice. */
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
 
/* Transcoder selection bits only update
* effectively on vblank. */
if (crtc)
intel_wait_for_vblank(encoder->base.dev, pipe);
else
msleep(50);
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
}
}
 
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
static void pch_disable_sdvo(struct intel_encoder *encoder)
{
}
 
static void pch_post_disable_sdvo(struct intel_encoder *encoder)
{
intel_disable_sdvo(encoder);
}
 
static void intel_enable_sdvo(struct intel_encoder *encoder)
1477,14 → 1494,9
bool success;
 
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0) {
/* HW workaround for IBX, we need to move the port
* to transcoder A before disabling it, so restore it here. */
if (HAS_PCH_IBX(dev))
temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
 
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
}
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
1504,51 → 1516,6
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
 
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
{
struct drm_crtc *crtc;
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
 
if (mode == connector->dpms)
return;
 
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
crtc = intel_sdvo->base.base.crtc;
if (!crtc) {
intel_sdvo->base.connectors_active = false;
return;
}
 
/* We set active outputs manually below in case pipe dpms doesn't change
* due to cloning. */
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
 
intel_sdvo->base.connectors_active = false;
 
intel_crtc_update_dpms(crtc);
} else {
intel_sdvo->base.connectors_active = true;
 
intel_crtc_update_dpms(crtc);
 
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
 
intel_modeset_check_state(connector->dev);
}
 
static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
1617,6 → 1584,9
struct drm_device *dev = intel_sdvo->base.base.dev;
uint16_t hotplug;
 
if (!I915_HAS_HOTPLUG(dev))
return 0;
 
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
2082,6 → 2052,23
goto done;
}
 
if (property == connector->dev->mode_config.aspect_ratio_property) {
switch (val) {
case DRM_MODE_PICTURE_ASPECT_NONE:
intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
break;
case DRM_MODE_PICTURE_ASPECT_4_3:
intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
break;
case DRM_MODE_PICTURE_ASPECT_16_9:
intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
break;
default:
return -EINVAL;
}
goto done;
}
 
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
2183,11 → 2170,14
}
 
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = intel_sdvo_dpms,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.destroy = intel_sdvo_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
 
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
2257,7 → 2247,7
*/
static void
intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
struct intel_sdvo *sdvo, u32 reg)
struct intel_sdvo *sdvo)
{
struct sdvo_device_mapping *mapping;
 
2274,7 → 2264,7
 
static void
intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct intel_sdvo *sdvo, u32 reg)
struct intel_sdvo *sdvo)
{
struct sdvo_device_mapping *mapping;
u8 pin;
2284,10 → 2274,11
else
mapping = &dev_priv->sdvo_mappings[1];
 
if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
if (mapping->initialized &&
intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
pin = mapping->i2c_pin;
else
pin = GMBUS_PORT_DPB;
pin = GMBUS_PIN_DPB;
 
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
 
2409,8 → 2400,26
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}
intel_attach_aspect_ratio_property(&connector->base.base);
intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
 
static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
{
struct intel_sdvo_connector *sdvo_connector;
 
sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL);
if (!sdvo_connector)
return NULL;
 
if (intel_connector_init(&sdvo_connector->base) < 0) {
kfree(sdvo_connector);
return NULL;
}
 
return sdvo_connector;
}
 
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
2422,7 → 2431,7
 
DRM_DEBUG_KMS("initialising DVI device %d\n", device);
 
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
 
2476,7 → 2485,7
 
DRM_DEBUG_KMS("initialising TV type %d\n", type);
 
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
 
2519,7 → 2528,7
 
DRM_DEBUG_KMS("initialising analog device %d\n", device);
 
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
 
2555,7 → 2564,7
 
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
 
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
intel_sdvo_connector = intel_sdvo_connector_alloc();
if (!intel_sdvo_connector)
return false;
 
2935,7 → 2944,7
intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
goto err_i2c_bus;
 
2956,7 → 2965,12
}
 
intel_encoder->compute_config = intel_sdvo_compute_config;
if (HAS_PCH_SPLIT(dev)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
intel_encoder->disable = intel_disable_sdvo;
}
intel_encoder->pre_enable = intel_sdvo_pre_enable;
intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
2992,7 → 3006,7
*/
intel_sdvo->base.cloneable = 0;
 
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
 
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
/drivers/video/drm/i915/intel_sideband.c
49,7 → 49,7
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
(bar << IOSF_BAR_SHIFT);
 
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
75,28 → 75,28
return 0;
}
 
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
{
u32 val = 0;
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
return val;
}
 
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
}
 
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
103,7 → 103,7
{
u32 val = 0;
 
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRRDDA_NP, reg, &val);
 
return val;
111,7 → 111,7
 
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRWRDA_NP, reg, &val);
}
 
121,10 → 121,10
 
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
mutex_unlock(&dev_priv->sb_lock);
 
return val;
}
132,7 → 132,7
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRRDDA_NP, reg, &val);
return val;
}
139,7 → 139,7
 
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRWRDA_NP, reg, &val);
}
 
146,7 → 146,7
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRRDDA_NP, reg, &val);
return val;
}
153,7 → 153,7
 
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRWRDA_NP, reg, &val);
}
 
160,7 → 160,7
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRRDDA_NP, reg, &val);
return val;
}
167,7 → 167,7
 
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRWRDA_NP, reg, &val);
}
 
174,7 → 174,7
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRRDDA_NP, reg, &val);
return val;
}
181,7 → 181,7
 
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRWRDA_NP, reg, &val);
}
 
213,7 → 213,7
enum intel_sbi_destination destination)
{
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
243,7 → 243,7
{
u32 tmp;
 
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
/drivers/video/drm/i915/intel_sprite.c
33,6 → 33,8
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
51,13 → 53,15
}
}
 
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
/* paranoia */
if (!mode->crtc_htotal)
if (!adjusted_mode->crtc_htotal)
return 1;
 
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
1000 * adjusted_mode->crtc_htotal);
}
 
/**
73,37 → 77,37
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays. The value written to @start_vbl_count should be
* supplied to intel_pipe_update_end() for error checking.
*
* Return: true if the call was successful
*/
bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
void intel_pipe_update_start(struct intel_crtc *crtc)
{
ENTER();
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
#if 0
// wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
// DEFINE_WAIT(wait);
 
vblank_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = adjusted_mode->crtc_vblank_start;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
 
/* FIXME needs to be calibrated sensibly */
min = vblank_start - usecs_to_scanlines(mode, 100);
min = vblank_start - usecs_to_scanlines(adjusted_mode, 100);
max = vblank_start - 1;
 
if (min <= 0 || max <= 0)
return false;
return;
 
// if (WARN_ON(drm_vblank_get(dev, pipe)))
// return false;
 
// local_irq_disable();
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
trace_i915_pipe_update_start(crtc);
 
// trace_i915_pipe_update_start(crtc, min, max);
 
for (;;) {
/*
* prepare_to_wait() has a memory barrier, which guarantees
130,14 → 134,14
}
 
finish_wait(wq, &wait);
#endif
 
// drm_vblank_put(dev, pipe);
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count =
dev->driver->get_vblank_counter(dev, pipe);
 
*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
 
// trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
 
return true;
trace_i915_pipe_update_vblank_evaded(crtc);
}
 
/**
149,36 → 153,33
* re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count.
*/
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
void intel_pipe_update_end(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
ktime_t end_vbl_time = ktime_get();
 
// trace_i915_pipe_update_end(crtc, end_vbl_count);
trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
 
// local_irq_enable();
 
if (start_vbl_count != end_vbl_count)
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
pipe_name(pipe), start_vbl_count, end_vbl_count);
if (crtc->debug.start_vbl_count &&
crtc->debug.start_vbl_count != end_vbl_count) {
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
pipe_name(pipe), crtc->debug.start_vbl_count,
end_vbl_count,
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
}
 
static void intel_update_primary_plane(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
int reg = DSPCNTR(crtc->plane);
 
if (crtc->primary_enabled)
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
else
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
}
 
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
186,85 → 187,39
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride;
u32 plane_ctl, stride_div, stride;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(drm_plane->state)->ckey;
unsigned long surf_addr;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
int scaler_id;
 
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
 
/* Mask out pixel format bits in case we change it */
plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
plane_ctl &= ~PLANE_CTL_TILED_MASK;
plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
 
/* Trickle feed has to be enabled */
plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
rotation = drm_plane->state->rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
 
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
break;
case DRM_FORMAT_XBGR8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
break;
case DRM_FORMAT_XRGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
/*
* XXX: For ARBG/ABGR formats we default to expecting scanout buffers
* to be already pre-multiplied. We need to add a knob (or a different
* DRM_FORMAT) for user-space to configure that.
*/
case DRM_FORMAT_ABGR8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
PLANE_CTL_ORDER_RGBX |
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
break;
case DRM_FORMAT_ARGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
break;
case DRM_FORMAT_YUYV:
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
break;
case DRM_FORMAT_YVYU:
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
break;
case DRM_FORMAT_UYVY:
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
break;
case DRM_FORMAT_VYUY:
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
break;
default:
BUG();
}
 
switch (obj->tiling_mode) {
case I915_TILING_NONE:
stride = fb->pitches[0] >> 6;
break;
case I915_TILING_X:
plane_ctl |= PLANE_CTL_TILED_X;
stride = fb->pitches[0] >> 9;
break;
default:
BUG();
}
if (intel_plane->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
 
plane_ctl |= PLANE_CTL_ENABLE;
plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
 
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
 
scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
 
/* Sizes are 0 based */
src_w--;
src_h--;
271,90 → 226,79
crtc_w--;
crtc_h--;
 
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
POSTING_READ(PLANE_SURF(pipe, plane));
}
 
static void
skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
 
I915_WRITE(PLANE_CTL(pipe, plane),
I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
 
/* Activate double buffered register update */
I915_WRITE(PLANE_CTL(pipe, plane), 0);
POSTING_READ(PLANE_CTL(pipe, plane));
 
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
}
 
static int
skl_update_colorkey(struct drm_plane *drm_plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane;
u32 plane_ctl;
 
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
}
 
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
if (key->flags & I915_SET_COLORKEY_DESTINATION)
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
 
POSTING_READ(PLANE_CTL(pipe, plane));
surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
 
return 0;
if (intel_rotation_90_or_270(rotation)) {
/* stride: Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height);
plane_size = (src_w << 16) | src_h;
x_offset = stride * tile_height - y - (src_h + 1);
y_offset = x;
} else {
stride = fb->pitches[0] / stride_div;
plane_size = (src_h << 16) | src_w;
x_offset = x;
y_offset = y;
}
plane_offset = y_offset << 16 | x_offset;
 
I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset);
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
 
/* program plane scaler */
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
 
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane));
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
crtc_state->scaler_state.scalers[scaler_id].mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
((crtc_w + 1) << 16)|(crtc_h + 1));
 
I915_WRITE(PLANE_POS(pipe, plane), 0);
} else {
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
}
 
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
 
static void
skl_get_colorkey(struct drm_plane *drm_plane,
struct drm_intel_sprite_colorkey *key)
skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = drm_plane->dev;
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct intel_plane *intel_plane = to_intel_plane(dplane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane;
u32 plane_ctl;
const int plane = intel_plane->plane + 1;
 
key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
I915_WRITE(PLANE_CTL(pipe, plane), 0);
 
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
I915_WRITE(PLANE_SURF(pipe, plane), 0);
POSTING_READ(PLANE_SURF(pipe, plane));
 
switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
case PLANE_CTL_KEY_ENABLE_DESTINATION:
key->flags = I915_SET_COLORKEY_DESTINATION;
break;
case PLANE_CTL_KEY_ENABLE_SOURCE:
key->flags = I915_SET_COLORKEY_SOURCE;
break;
default:
key->flags = I915_SET_COLORKEY_NONE;
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
}
 
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
398,7 → 342,7
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
406,23 → 350,17
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
u32 start_vbl_count;
bool atomic_update;
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(dplane->state)->ckey;
 
sprctl = I915_READ(SPCNTR(pipe, plane));
sprctl = SP_ENABLE;
 
/* Mask out pixel format bits in case we change it */
sprctl &= ~SP_PIXFORMAT_MASK;
sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
sprctl &= ~SP_TILED;
sprctl &= ~SP_ROTATE_180;
 
switch (fb->pixel_format) {
case DRM_FORMAT_YUYV:
sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
475,12 → 413,6
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SP_TILED;
 
sprctl |= SP_ENABLE;
 
intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
src_w--;
src_h--;
488,13 → 420,14
crtc_h--;
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
&x, &y,
obj->tiling_mode,
pixel_size,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
 
x += src_w;
502,9 → 435,14
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
if (key->flags) {
I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
}
 
intel_update_primary_plane(intel_crtc);
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
 
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
523,11 → 461,7
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
POSTING_READ(SPSURF(pipe, plane));
}
 
static void
536,84 → 470,19
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 start_vbl_count;
bool atomic_update;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
I915_WRITE(SPCNTR(pipe, plane), 0);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
~SP_ENABLE);
/* Activate double buffered register update */
I915_WRITE(SPSURF(pipe, plane), 0);
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
 
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
POSTING_READ(SPSURF(pipe, plane));
}
 
static int
vlv_update_colorkey(struct drm_plane *dplane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
 
if (key->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
 
I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
 
sprctl = I915_READ(SPCNTR(pipe, plane));
sprctl &= ~SP_SOURCE_KEY;
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
I915_WRITE(SPCNTR(pipe, plane), sprctl);
 
POSTING_READ(SPKEYMSK(pipe, plane));
 
return 0;
}
 
static void
vlv_get_colorkey(struct drm_plane *dplane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
 
key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
 
sprctl = I915_READ(SPCNTR(pipe, plane));
if (sprctl & SP_SOURCE_KEY)
key->flags = I915_SET_COLORKEY_SOURCE;
else
key->flags = I915_SET_COLORKEY_NONE;
}
 
static void
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
621,23 → 490,16
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
u32 start_vbl_count;
bool atomic_update;
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(plane->state)->ckey;
 
sprctl = I915_READ(SPRCTL(pipe));
sprctl = SPRITE_ENABLE;
 
/* Mask out pixel format bits in case we change it */
sprctl &= ~SPRITE_PIXFORMAT_MASK;
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
sprctl &= ~SPRITE_TILED;
sprctl &= ~SPRITE_ROTATE_180;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
675,8 → 537,6
else
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
 
sprctl |= SPRITE_ENABLE;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
695,11 → 555,12
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
 
/* HSW and BDW does this automagically in hardware */
711,9 → 572,16
}
}
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
if (key->flags) {
I915_WRITE(SPRKEYVAL(pipe), key->min_value);
I915_WRITE(SPRKEYMAX(pipe), key->max_value);
I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
}
 
intel_update_primary_plane(intel_crtc);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
sprctl |= SPRITE_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SPRITE_SOURCE_KEY;
 
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
733,11 → 601,7
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
POSTING_READ(SPRSURF(pipe));
}
 
static void
746,94 → 610,21
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
u32 start_vbl_count;
bool atomic_update;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
I915_WRITE(SPRCTL(pipe), 0);
/* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
 
I915_WRITE(SPRSURF(pipe), 0);
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
 
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
*/
intel_wait_for_vblank(dev, pipe);
 
intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
POSTING_READ(SPRSURF(pipe));
}
 
static int
ivb_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane;
u32 sprctl;
int ret = 0;
 
intel_plane = to_intel_plane(plane);
 
I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
 
sprctl = I915_READ(SPRCTL(intel_plane->pipe));
sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
sprctl |= SPRITE_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SPRITE_SOURCE_KEY;
I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
 
POSTING_READ(SPRKEYMSK(intel_plane->pipe));
 
return ret;
}
 
static void
ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane;
u32 sprctl;
 
intel_plane = to_intel_plane(plane);
 
key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
key->flags = 0;
 
sprctl = I915_READ(SPRCTL(intel_plane->pipe));
 
if (sprctl & SPRITE_DEST_KEY)
key->flags = I915_SET_COLORKEY_DESTINATION;
else if (sprctl & SPRITE_SOURCE_KEY)
key->flags = I915_SET_COLORKEY_SOURCE;
else
key->flags = I915_SET_COLORKEY_NONE;
}
 
static void
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
841,23 → 632,16
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
u32 start_vbl_count;
bool atomic_update;
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(plane->state)->ckey;
 
dvscntr = I915_READ(DVSCNTR(pipe));
dvscntr = DVS_ENABLE;
 
/* Mask out pixel format bits in case we change it */
dvscntr &= ~DVS_PIXFORMAT_MASK;
dvscntr &= ~DVS_RGB_ORDER_XBGR;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
dvscntr &= ~DVS_TILED;
dvscntr &= ~DVS_ROTATE_180;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
892,7 → 676,6
 
if (IS_GEN6(dev))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
 
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
pixel_size, true,
910,11 → 693,12
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
 
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
 
x += src_w;
922,9 → 706,16
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
if (key->flags) {
I915_WRITE(DVSKEYVAL(pipe), key->min_value);
I915_WRITE(DVSKEYMAX(pipe), key->max_value);
I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
}
 
intel_update_primary_plane(intel_crtc);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
dvscntr |= DVS_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
dvscntr |= DVS_SOURCE_KEY;
 
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
939,11 → 730,7
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
POSTING_READ(DVSSURF(pipe));
}
 
static void
952,165 → 739,42
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
u32 start_vbl_count;
bool atomic_update;
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
I915_WRITE(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
 
I915_WRITE(DVSSURF(pipe), 0);
 
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
 
/*
* Avoid underruns when disabling the sprite.
* FIXME remove once watermark updates are done properly.
*/
intel_wait_for_vblank(dev, pipe);
 
intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
POSTING_READ(DVSSURF(pipe));
}
 
static void
intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev))
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
* versa.
*/
hsw_enable_ips(intel_crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void
intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.plane == intel_crtc->plane)
intel_disable_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
* versa.
*/
hsw_disable_ips(intel_crtc);
}
 
static int
ilk_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane;
u32 dvscntr;
int ret = 0;
 
intel_plane = to_intel_plane(plane);
 
I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
 
dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
dvscntr |= DVS_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
dvscntr |= DVS_SOURCE_KEY;
I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
 
POSTING_READ(DVSKEYMSK(intel_plane->pipe));
 
return ret;
}
 
static void
ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane;
u32 dvscntr;
 
intel_plane = to_intel_plane(plane);
 
key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
key->flags = 0;
 
dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
 
if (dvscntr & DVS_DEST_KEY)
key->flags = I915_SET_COLORKEY_DESTINATION;
else if (dvscntr & DVS_SOURCE_KEY)
key->flags = I915_SET_COLORKEY_SOURCE;
else
key->flags = I915_SET_COLORKEY_NONE;
}
 
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
struct drm_intel_sprite_colorkey key;
 
intel_plane->get_colorkey(&intel_plane->base, &key);
 
return key.flags != I915_SET_COLORKEY_NONE;
}
 
static int
intel_check_sprite_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
struct drm_device *dev = plane->dev;
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_framebuffer *fb = state->base.fb;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
struct drm_rect *src = &state->src;
struct drm_rect *dst = &state->dst;
struct drm_rect *orig_src = &state->orig_src;
const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
bool can_scale;
int pixel_size;
 
if (!fb) {
state->visible = false;
return 0;
}
 
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
1123,15 → 787,23
return -EINVAL;
}
 
/* Sprite planes can be linear or x-tiled surfaces */
switch (obj->tiling_mode) {
case I915_TILING_NONE:
case I915_TILING_X:
break;
default:
DRM_DEBUG_KMS("Unsupported tiling mode\n");
return -EINVAL;
/* setup can_scale, min_scale, max_scale */
if (INTEL_INFO(dev)->gen >= 9) {
/* use scaler when colorkey is not required */
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
min_scale = 1;
max_scale = skl_max_scale(intel_crtc, crtc_state);
} else {
can_scale = 0;
min_scale = DRM_PLANE_HELPER_NO_SCALING;
max_scale = DRM_PLANE_HELPER_NO_SCALING;
}
} else {
can_scale = intel_plane->can_scale;
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
}
 
/*
* FIXME the following code does a bunch of fuzzy adjustments to the
1138,11 → 810,8
* coordinates and sizes. We probably need some way to decide whether
* more strict checking should be done instead.
*/
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
 
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
state->base.rotation);
 
hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(hscale < 0);
1183,13 → 852,13
drm_rect_height(dst) * vscale - drm_rect_height(src));
 
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
state->base.rotation);
 
/* sanity check to make sure the src viewport wasn't enlarged */
WARN_ON(src->x1 < (int) orig_src->x1 ||
src->y1 < (int) orig_src->y1 ||
src->x2 > (int) orig_src->x2 ||
src->y2 > (int) orig_src->y2);
WARN_ON(src->x1 < (int) state->base.src_x ||
src->y1 < (int) state->base.src_y ||
src->x2 > (int) state->base.src_x + state->base.src_w ||
src->y2 > (int) state->base.src_y + state->base.src_h);
 
/*
* Hardware doesn't handle subpixel coordinates.
1210,7 → 879,7
* Must keep src and dst the
* same if we can't scale.
*/
if (!intel_plane->can_scale)
if (!can_scale)
crtc_w &= ~1;
 
if (crtc_w == 0)
1222,7 → 891,7
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
 
WARN_ON(!intel_plane->can_scale);
WARN_ON(!can_scale);
 
/* FIXME interlacing min height is 6 */
 
1232,11 → 901,12
if (src_w < 3 || src_h < 3)
state->visible = false;
 
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
 
if (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096) {
if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
}
1243,10 → 913,10
}
 
if (state->visible) {
src->x1 = src_x;
src->x2 = src_x + src_w;
src->y1 = src_y;
src->y2 = src_y + src_h;
src->x1 = src_x << 16;
src->x2 = (src_x + src_w) << 16;
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
}
 
dst->x1 = crtc_x;
1257,341 → 927,89
return 0;
}
 
static int
intel_prepare_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int ret;
 
if (old_obj != obj) {
mutex_lock(&dev->struct_mutex);
 
/* Note that this will apply the VT-d workaround for scanouts,
* which is more restrictive than required for sprites. (The
* primary plane requires 256KiB alignment with 64 PTE padding,
* the sprite planes only require 128KiB alignment and 32 PTE
* padding.
*/
ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
if (ret == 0)
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
}
 
return 0;
}
 
static void
intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_crtc *crtc = state->base.crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
struct drm_rect *dst = &state->dst;
const struct drm_rect *clip = &state->clip;
bool primary_enabled;
struct drm_framebuffer *fb = state->base.fb;
 
/*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
crtc = crtc ? crtc : plane->crtc;
 
intel_plane->crtc_x = state->orig_dst.x1;
intel_plane->crtc_y = state->orig_dst.y1;
intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
intel_plane->src_x = state->orig_src.x1;
intel_plane->src_y = state->orig_src.y1;
intel_plane->src_w = drm_rect_width(&state->orig_src);
intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
if (!crtc->state->active)
return;
 
if (intel_crtc->active) {
bool primary_was_enabled = intel_crtc->primary_enabled;
 
intel_crtc->primary_enabled = primary_enabled;
 
// if (primary_was_enabled != primary_enabled)
// intel_crtc_wait_for_pending_flips(crtc);
 
if (primary_was_enabled && !primary_enabled)
intel_pre_disable_primary(crtc);
 
if (state->visible) {
crtc_x = state->dst.x1;
crtc_y = state->dst.y1;
crtc_w = drm_rect_width(&state->dst);
crtc_h = drm_rect_height(&state->dst);
src_x = state->src.x1;
src_y = state->src.y1;
src_w = drm_rect_width(&state->src);
src_h = drm_rect_height(&state->src);
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
intel_plane->update_plane(plane, crtc, fb,
state->dst.x1, state->dst.y1,
drm_rect_width(&state->dst),
drm_rect_height(&state->dst),
state->src.x1 >> 16,
state->src.y1 >> 16,
drm_rect_width(&state->src) >> 16,
drm_rect_height(&state->src) >> 16);
} else {
intel_plane->disable_plane(plane, crtc);
}
 
 
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
 
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
 
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj && old_obj != obj) {
 
/*
* It's fairly common to simply update the position of
* an existing object. In that case, we don't need to
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
}
 
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct intel_plane_state state;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
 
state.crtc = crtc;
state.fb = fb;
 
/* sample coordinates in 16.16 fixed point */
state.src.x1 = src_x;
state.src.x2 = src_x + src_w;
state.src.y1 = src_y;
state.src.y2 = src_y + src_h;
 
/* integer pixels */
state.dst.x1 = crtc_x;
state.dst.x2 = crtc_x + crtc_w;
state.dst.y1 = crtc_y;
state.dst.y2 = crtc_y + crtc_h;
 
state.clip.x1 = 0;
state.clip.y1 = 0;
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
state.orig_src = state.src;
state.orig_dst = state.dst;
 
ret = intel_check_sprite_plane(plane, &state);
if (ret)
return ret;
 
ret = intel_prepare_sprite_plane(plane, &state);
if (ret)
return ret;
 
intel_commit_sprite_plane(plane, &state);
return 0;
}
 
static int
intel_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
enum pipe pipe;
 
if (!plane->fb)
return 0;
 
if (WARN_ON(!plane->crtc))
return -EINVAL;
 
intel_crtc = to_intel_crtc(plane->crtc);
pipe = intel_crtc->pipe;
 
if (intel_crtc->active) {
bool primary_was_enabled = intel_crtc->primary_enabled;
 
intel_crtc->primary_enabled = true;
 
intel_plane->disable_plane(plane, plane->crtc);
 
if (!primary_was_enabled && intel_crtc->primary_enabled)
intel_post_enable_primary(plane->crtc);
}
 
if (intel_plane->obj) {
if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_plane->pipe);
 
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
i915_gem_track_fb(intel_plane->obj, NULL,
INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
 
intel_plane->obj = NULL;
}
 
return 0;
}
 
static void intel_destroy_plane(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
intel_disable_plane(plane);
drm_plane_cleanup(plane);
kfree(intel_plane);
}
 
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct intel_plane *intel_plane;
struct drm_plane_state *plane_state;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
 
drm_modeset_lock_all(dev);
if (IS_VALLEYVIEW(dev) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
 
plane = drm_plane_find(dev, set->plane_id);
if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
return -ENOENT;
 
intel_plane = to_intel_plane(plane);
ret = intel_plane->update_colorkey(plane, set);
drm_modeset_acquire_init(&ctx, 0);
 
out_unlock:
drm_modeset_unlock_all(dev);
return ret;
state = drm_atomic_state_alloc(plane->dev);
if (!state) {
ret = -ENOMEM;
goto out;
}
state->acquire_ctx = &ctx;
 
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
drm_modeset_lock_all(dev);
 
plane = drm_plane_find(dev, get->plane_id);
if (!plane) {
ret = -ENOENT;
goto out_unlock;
while (1) {
plane_state = drm_atomic_get_plane_state(state, plane);
ret = PTR_ERR_OR_ZERO(plane_state);
if (!ret) {
to_intel_plane_state(plane_state)->ckey = *set;
ret = drm_atomic_commit(state);
}
 
intel_plane = to_intel_plane(plane);
intel_plane->get_colorkey(plane, get);
if (ret != -EDEADLK)
break;
 
out_unlock:
drm_modeset_unlock_all(dev);
return ret;
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
}
 
int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop,
uint64_t val)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
uint64_t old_val;
int ret = -ENOENT;
 
if (prop == dev->mode_config.rotation_property) {
/* exactly one rotation angle please */
if (hweight32(val & 0xf) != 1)
return -EINVAL;
 
if (intel_plane->rotation == val)
return 0;
 
old_val = intel_plane->rotation;
intel_plane->rotation = val;
ret = intel_plane_restore(plane);
if (ret)
intel_plane->rotation = old_val;
}
drm_atomic_state_free(state);
 
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
 
int intel_plane_restore(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
 
if (!plane->crtc || !plane->fb)
return 0;
 
return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
intel_plane->crtc_x, intel_plane->crtc_y,
intel_plane->crtc_w, intel_plane->crtc_h,
intel_plane->src_x, intel_plane->src_y,
intel_plane->src_w, intel_plane->src_h);
}
 
void intel_plane_disable(struct drm_plane *plane)
{
if (!plane->crtc || !plane->fb)
return;
 
intel_disable_plane(plane);
}
 
static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
.destroy = intel_destroy_plane,
.set_property = intel_plane_set_property,
};
 
static uint32_t ilk_plane_formats[] = {
static const uint32_t ilk_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
1599,7 → 1017,7
DRM_FORMAT_VYUY,
};
 
static uint32_t snb_plane_formats[] = {
static const uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
1608,7 → 1026,7
DRM_FORMAT_VYUY,
};
 
static uint32_t vlv_plane_formats[] = {
static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
1638,6 → 1056,7
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
struct intel_plane *intel_plane;
struct intel_plane_state *state;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
1650,6 → 1069,13
if (!intel_plane)
return -ENOMEM;
 
state = intel_create_plane_state(&intel_plane->base);
if (!state) {
kfree(intel_plane);
return -ENOMEM;
}
intel_plane->base.state = &state->base;
 
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
1657,8 → 1083,6
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
intel_plane->update_colorkey = ilk_update_colorkey;
intel_plane->get_colorkey = ilk_get_colorkey;
 
if (IS_GEN6(dev)) {
plane_formats = snb_plane_formats;
1682,8 → 1106,6
if (IS_VALLEYVIEW(dev)) {
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
intel_plane->update_colorkey = vlv_update_colorkey;
intel_plane->get_colorkey = vlv_get_colorkey;
 
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
1690,8 → 1112,6
} else {
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
 
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
1698,16 → 1118,10
}
break;
case 9:
/*
* FIXME: Skylake planes can be scaled (with some restrictions),
* but this is for another time.
*/
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
intel_plane->can_scale = true;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->update_colorkey = skl_update_colorkey;
intel_plane->get_colorkey = skl_get_colorkey;
state->scaler_id = -1;
 
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
1719,7 → 1133,9
 
intel_plane->pipe = pipe;
intel_plane->plane = plane;
intel_plane->rotation = BIT(DRM_ROTATE_0);
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
1730,16 → 1146,9
goto out;
}
 
if (!dev->mode_config.rotation_property)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
BIT(DRM_ROTATE_0) |
BIT(DRM_ROTATE_180));
intel_create_rotation_property(dev, intel_plane);
 
if (dev->mode_config.rotation_property)
drm_object_attach_property(&intel_plane->base.base,
dev->mode_config.rotation_property,
intel_plane->rotation);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
 
out:
return ret;
/drivers/video/drm/i915/intel_uncore.c
23,9 → 23,12
 
#include "i915_drv.h"
#include "intel_drv.h"
#include "i915_vgpu.h"
 
#define FORCEWAKE_ACK_TIMEOUT_MS 2
#include <linux/pm_runtime.h>
 
#define FORCEWAKE_ACK_TIMEOUT_MS 50
 
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
 
40,6 → 43,25
 
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
 
static const char * const forcewake_domain_names[] = {
"render",
"blitter",
"media",
};
 
const char *
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
{
BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
 
if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
return forcewake_domain_names[id];
 
WARN_ON(id);
 
return "unknown";
}
 
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
47,481 → 69,275
"Device suspended\n");
}
 
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
WARN_ON(d->reg_set == 0);
__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
}
 
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
// __raw_i915_write32(dev_priv, FORCEWAKE, 0);
// /* something from same cacheline, but !FORCEWAKE */
// __raw_posting_read(dev_priv, ECOBUS);
}
 
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
int fw_engine)
static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE, 1);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
/* WaRsForcewakeWaitTC0:snb */
__gen6_gt_wait_for_thread_c0(dev_priv);
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
 
static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
static inline void
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
__raw_i915_write32(d->i915, d->reg_set, d->val_set);
}
 
static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
int fw_engine)
static inline void
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
{
u32 forcewake_ack;
 
if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
 
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
/* WaRsForcewakeWaitTC0:ivb,hsw */
__gen6_gt_wait_for_thread_c0(dev_priv);
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
 
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static inline void
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
{
u32 gtfifodbg;
 
gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
}
 
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
int fw_engine)
static inline void
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
{
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
/* something from same cacheline, but not from the set register */
if (d->reg_post)
__raw_posting_read(d->i915, d->reg_post);
}
 
static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
int fw_engine)
static void
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id;
 
if (IS_GEN7(dev_priv->dev))
gen6_gt_check_fifodbg(dev_priv);
for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
fw_domain_wait_ack_clear(d);
fw_domain_get(d);
fw_domain_wait_ack(d);
}
}
 
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
static void
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
int ret = 0;
struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id;
 
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(dev_priv->dev))
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
 
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
fw_domain_put(d);
fw_domain_posting_read(d);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
dev_priv->uncore.fifo_count = fifo;
}
dev_priv->uncore.fifo_count--;
 
return ret;
}
 
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
static void
fw_domains_posting_read(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(0xffff));
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id;
 
static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_VLV) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Render to ack.\n");
/* No need to do for all, just do for first found */
for_each_fw_domain(d, dev_priv, id) {
fw_domain_posting_read(d);
break;
}
 
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for media to ack.\n");
}
}
 
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
int fw_engine)
static void
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id;
 
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
if (dev_priv->uncore.fw_domains == 0)
return;
 
for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
fw_domain_reset(d);
 
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
if (!IS_CHERRYVIEW(dev_priv->dev))
gen6_gt_check_fifodbg(dev_priv);
fw_domains_posting_read(dev_priv);
}
 
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
if (fw_engine & FORCEWAKE_RENDER &&
dev_priv->uncore.fw_rendercount++ != 0)
fw_engine &= ~FORCEWAKE_RENDER;
if (fw_engine & FORCEWAKE_MEDIA &&
dev_priv->uncore.fw_mediacount++ != 0)
fw_engine &= ~FORCEWAKE_MEDIA;
 
if (fw_engine)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
 
static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
fw_domains_get(dev_priv, fw_domains);
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
if (fw_engine & FORCEWAKE_RENDER) {
WARN_ON(!dev_priv->uncore.fw_rendercount);
if (--dev_priv->uncore.fw_rendercount != 0)
fw_engine &= ~FORCEWAKE_RENDER;
/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
if (fw_engine & FORCEWAKE_MEDIA) {
WARN_ON(!dev_priv->uncore.fw_mediacount);
if (--dev_priv->uncore.fw_mediacount != 0)
fw_engine &= ~FORCEWAKE_MEDIA;
}
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
u32 gtfifodbg;
 
if (fw_engine)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
}
 
static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_DISABLE(0xffff));
 
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_DISABLE(0xffff));
 
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_DISABLE(0xffff));
fw_domains_put(dev_priv, fw_domains);
gen6_gt_check_fifodbg(dev_priv);
}
 
static void
__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
{
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_RENDER_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
 
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_RENDER_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Render to ack.\n");
return count & GT_FIFO_FREE_ENTRIES_MASK;
}
 
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
 
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(dev_priv->dev))
dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Media to ack.\n");
}
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = fifo_free_entries(dev_priv);
 
/* Check for Blitter Engine */
if (FORCEWAKE_BLITTER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_BLITTER_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_BLITTER_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
fifo = fifo_free_entries(dev_priv);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
dev_priv->uncore.fifo_count = fifo;
}
dev_priv->uncore.fifo_count--;
 
static void
__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
/* Check for Blitter Engine */
if (FORCEWAKE_BLITTER & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
return ret;
}
 
static void
gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
static void intel_uncore_fw_release_timer(unsigned long arg)
{
struct intel_uncore_forcewake_domain *domain = (void *)arg;
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
assert_device_not_suspended(domain->i915);
 
if (FORCEWAKE_RENDER & fw_engine) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
}
spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
 
if (FORCEWAKE_MEDIA & fw_engine) {
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
}
if (--domain->wake_count == 0)
domain->i915->uncore.funcs.force_wake_put(domain->i915,
1 << domain->id);
 
if (FORCEWAKE_BLITTER & fw_engine) {
if (dev_priv->uncore.fw_blittercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_BLITTER);
spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
}
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
static void
gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
enum forcewake_domain_id id;
enum forcewake_domains fw = 0, active_domains;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly. Wait until all pending
* timers are run before holding.
*/
while (1) {
active_domains = 0;
 
if (FORCEWAKE_RENDER & fw_engine) {
WARN_ON(dev_priv->uncore.fw_rendercount == 0);
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
}
for_each_fw_domain(domain, dev_priv, id) {
if (del_timer_sync(&domain->timer) == 0)
continue;
 
if (FORCEWAKE_MEDIA & fw_engine) {
WARN_ON(dev_priv->uncore.fw_mediacount == 0);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
intel_uncore_fw_release_timer((unsigned long)domain);
}
 
if (FORCEWAKE_BLITTER & fw_engine) {
WARN_ON(dev_priv->uncore.fw_blittercount == 0);
if (--dev_priv->uncore.fw_blittercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_BLITTER);
}
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
for_each_fw_domain(domain, dev_priv, id) {
// if (timer_pending(&domain->timer))
// active_domains |= (1 << id);
}
 
static void gen6_force_wake_timer(unsigned long arg)
{
struct drm_i915_private *dev_priv = (void *)arg;
unsigned long irqflags;
if (active_domains == 0)
break;
 
assert_device_not_suspended(dev_priv);
if (--retry_count == 0) {
DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
break;
}
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
WARN_ON(!dev_priv->uncore.forcewake_count);
 
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
intel_runtime_pm_put(dev_priv);
change_task();
}
 
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
WARN_ON(active_domains);
 
if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
gen6_force_wake_timer((unsigned long)dev_priv);
for_each_fw_domain(domain, dev_priv, id)
if (domain->wake_count)
fw |= 1 << id;
 
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
 
if (IS_VALLEYVIEW(dev))
vlv_force_wake_reset(dev_priv);
else if (IS_GEN6(dev) || IS_GEN7(dev))
__gen6_gt_force_wake_reset(dev_priv);
fw_domains_reset(dev_priv, FORCEWAKE_ALL);
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
__gen7_gt_force_wake_mt_reset(dev_priv);
 
if (IS_GEN9(dev))
__gen9_gt_force_wake_mt_reset(dev_priv);
 
if (restore) { /* If reset with a user forcewake, try to restore */
unsigned fw = 0;
 
if (IS_VALLEYVIEW(dev)) {
if (dev_priv->uncore.fw_rendercount)
fw |= FORCEWAKE_RENDER;
 
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
} else if (IS_GEN9(dev)) {
if (dev_priv->uncore.fw_rendercount)
fw |= FORCEWAKE_RENDER;
 
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
 
if (dev_priv->uncore.fw_blittercount)
fw |= FORCEWAKE_BLITTER;
} else {
if (dev_priv->uncore.forcewake_count)
fw = FORCEWAKE_ALL;
}
 
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
 
if (IS_GEN6(dev) || IS_GEN7(dev))
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
fifo_free_entries(dev_priv);
}
 
if (!restore)
assert_forcewakes_inactive(dev_priv);
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
static void __intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake)
static void intel_uncore_ellc_detect(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
INTEL_INFO(dev)->gen >= 9) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
530,12 → 346,29
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
}
 
static void __intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
 
/* WaDisableShadowRegForCpd:chv */
if (IS_CHERRYVIEW(dev)) {
__raw_i915_write32(dev_priv, GTFIFOCTL,
__raw_i915_read32(dev_priv, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
GT_FIFO_CTL_RC6_POLICY_STALL);
}
 
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
 
551,13 → 384,41
intel_disable_gt_powersave(dev);
}
 
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
 
if (!dev_priv->uncore.funcs.force_wake_get)
return;
 
fw_domains &= dev_priv->uncore.fw_domains;
 
for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
if (domain->wake_count++)
fw_domains &= ~(1 << id);
}
 
if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
 
/**
* intel_uncore_forcewake_get - grab forcewake domain references
* @dev_priv: i915 device instance
* @fw_domains: forcewake domains to get reference on
*
* This function can be used get GT's forcewake domain references.
* Normal register access will handle the forcewake domains automatically.
* However if some sequence requires the GT to not power down a particular
* forcewake domains this function should be called at the beginning of the
* sequence. And subsequently the reference should be dropped by symmetric
* call to intel_unforce_forcewake_put(). Usually caller wants all the domains
* to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
 
564,72 → 425,109
if (!dev_priv->uncore.funcs.force_wake_get)
return;
 
intel_runtime_pm_get(dev_priv);
WARN_ON(dev_priv->pm.suspended);
 
/* Redirect to Gen9 specific routine */
if (IS_GEN9(dev_priv->dev))
return gen9_force_wake_get(dev_priv, fw_engine);
 
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev))
return vlv_force_wake_get(dev_priv, fw_engine);
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
__intel_uncore_forcewake_get(dev_priv, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
/*
* see gen6_gt_force_wake_get()
/**
* intel_uncore_forcewake_get__locked - grab forcewake domain references
* @dev_priv: i915 device instance
* @fw_domains: forcewake domains to get reference on
*
* See intel_uncore_forcewake_get(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
bool delayed = false;
assert_spin_locked(&dev_priv->uncore.lock);
 
if (!dev_priv->uncore.funcs.force_wake_put)
if (!dev_priv->uncore.funcs.force_wake_get)
return;
 
/* Redirect to Gen9 specific routine */
if (IS_GEN9(dev_priv->dev)) {
gen9_force_wake_put(dev_priv, fw_engine);
goto out;
__intel_uncore_forcewake_get(dev_priv, fw_domains);
}
 
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev)) {
vlv_force_wake_put(dev_priv, fw_engine);
goto out;
static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
 
if (!dev_priv->uncore.funcs.force_wake_put)
return;
 
fw_domains &= dev_priv->uncore.fw_domains;
 
for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
if (WARN_ON(domain->wake_count == 0))
continue;
 
if (--domain->wake_count)
continue;
 
domain->wake_count++;
fw_domain_arm_timer(domain);
}
}
 
/**
* intel_uncore_forcewake_put - release a forcewake domain reference
* @dev_priv: i915 device instance
* @fw_domains: forcewake domains to put references
*
* This function drops the device-level forcewakes for specified
* domains obtained by intel_uncore_forcewake_get().
*/
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
 
if (!dev_priv->uncore.funcs.force_wake_put)
return;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
WARN_ON(!dev_priv->uncore.forcewake_count);
 
if (--dev_priv->uncore.forcewake_count == 0) {
dev_priv->uncore.forcewake_count++;
delayed = true;
// mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
// GetTimerTicks() + 1);
__intel_uncore_forcewake_put(dev_priv, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
out:
if (!delayed)
intel_runtime_pm_put(dev_priv);
/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
* @dev_priv: i915 device instance
* @fw_domains: forcewake domains to get reference on
*
* See intel_uncore_forcewake_put(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
assert_spin_locked(&dev_priv->uncore.lock);
 
if (!dev_priv->uncore.funcs.force_wake_put)
return;
 
__intel_uncore_forcewake_put(dev_priv, fw_domains);
}
 
void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
 
if (!dev_priv->uncore.funcs.force_wake_get)
return;
 
WARN_ON(dev_priv->uncore.forcewake_count > 0);
for_each_fw_domain(domain, dev_priv, id)
WARN_ON(domain->wake_count);
}
 
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
#define NEEDS_FORCE_WAKE(reg) \
((reg) < 0x40000 && (reg) != FORCEWAKE)
 
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
647,9 → 545,9
 
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
REG_RANGE((reg), 0x5000, 0x8000) || \
REG_RANGE((reg), 0x5200, 0x8000) || \
REG_RANGE((reg), 0x8300, 0x8500) || \
REG_RANGE((reg), 0xB000, 0xC000) || \
REG_RANGE((reg), 0xB000, 0xB480) || \
REG_RANGE((reg), 0xE000, 0xE800))
 
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
658,7 → 556,7
REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x1A000, 0x1C000) || \
REG_RANGE((reg), 0x1E800, 0x1EA00) || \
REG_RANGE((reg), 0x30000, 0x40000))
REG_RANGE((reg), 0x30000, 0x38000))
 
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x4000, 0x5000) || \
665,10 → 563,7
REG_RANGE((reg), 0x8000, 0x8300) || \
REG_RANGE((reg), 0x8500, 0x8600) || \
REG_RANGE((reg), 0x9000, 0xB000) || \
REG_RANGE((reg), 0xC000, 0xC800) || \
REG_RANGE((reg), 0xF000, 0x10000) || \
REG_RANGE((reg), 0x14000, 0x14400) || \
REG_RANGE((reg), 0x22000, 0x24000))
REG_RANGE((reg), 0xF000, 0x10000))
 
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
725,6 → 620,7
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
i915.mmio_debug--; /* Only report the first N failures */
}
}
 
731,142 → 627,172
static void
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
if (i915.mmio_debug)
static bool mmio_debug_once = true;
 
if (i915.mmio_debug || !mmio_debug_once)
return;
 
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
i915.mmio_debug = mmio_debug_once--;
}
}
 
#define REG_READ_HEADER(x) \
unsigned long irqflags; \
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
assert_device_not_suspended(dev_priv);
 
#define REG_READ_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
 
#define __gen4_read(x) \
#define __gen2_read(x) \
static u##x \
gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
REG_READ_FOOTER; \
GEN2_READ_FOOTER; \
}
 
#define __gen5_read(x) \
static u##x \
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
GEN2_READ_HEADER(x); \
ilk_dummy_write(dev_priv); \
val = __raw_i915_read##x(dev_priv, reg); \
REG_READ_FOOTER; \
GEN2_READ_FOOTER; \
}
 
__gen5_read(8)
__gen5_read(16)
__gen5_read(32)
__gen5_read(64)
__gen2_read(8)
__gen2_read(16)
__gen2_read(32)
__gen2_read(64)
 
#undef __gen5_read
#undef __gen2_read
 
#undef GEN2_READ_FOOTER
#undef GEN2_READ_HEADER
 
#define GEN6_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define GEN6_READ_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
 
static inline void __force_wake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
 
if (WARN_ON(!fw_domains))
return;
 
/* Ideally GCC would be constant-fold and eliminate this loop */
for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
if (domain->wake_count) {
fw_domains &= ~(1 << id);
continue;
}
 
domain->wake_count++;
fw_domain_arm_timer(domain);
}
 
if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
}
 
#define __vgpu_read(x) \
static u##x \
vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
GEN6_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
 
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (dev_priv->uncore.forcewake_count == 0 && \
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
FORCEWAKE_ALL); \
if (NEEDS_FORCE_WAKE(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
val = __raw_i915_read##x(dev_priv, reg); \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
FORCEWAKE_ALL); \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
REG_READ_FOOTER; \
GEN6_READ_FOOTER; \
}
 
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
unsigned fwengine = 0; \
REG_READ_HEADER(x); \
if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
GEN6_READ_HEADER(x); \
if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
val = __raw_i915_read##x(dev_priv, reg); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
REG_READ_FOOTER; \
GEN6_READ_FOOTER; \
}
 
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
unsigned fwengine = 0; \
REG_READ_HEADER(x); \
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
GEN6_READ_HEADER(x); \
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, \
FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
val = __raw_i915_read##x(dev_priv, reg); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
REG_READ_FOOTER; \
GEN6_READ_FOOTER; \
}
 
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
#define SKL_NEEDS_FORCE_WAKE(reg) \
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
 
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (!SKL_NEEDS_FORCE_WAKE(reg)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
else \
fw_engine = FORCEWAKE_BLITTER; \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
} else { \
unsigned fwengine = 0; \
if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} else { \
if (dev_priv->uncore.fw_blittercount == 0) \
fwengine = FORCEWAKE_BLITTER; \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
val = __raw_i915_read##x(dev_priv, reg); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
} \
REG_READ_FOOTER; \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
 
__vgpu_read(8)
__vgpu_read(16)
__vgpu_read(32)
__vgpu_read(64)
__gen9_read(8)
__gen9_read(16)
__gen9_read(32)
883,56 → 809,68
__gen6_read(16)
__gen6_read(32)
__gen6_read(64)
__gen5_read(8)
__gen5_read(16)
__gen5_read(32)
__gen5_read(64)
__gen4_read(8)
__gen4_read(16)
__gen4_read(32)
__gen4_read(64)
 
#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
#undef __gen4_read
#undef REG_READ_FOOTER
#undef REG_READ_HEADER
#undef __vgpu_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
 
#define REG_WRITE_HEADER \
unsigned long irqflags; \
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define REG_WRITE_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
#define GEN2_WRITE_FOOTER
 
#define __gen4_write(x) \
#define __gen2_write(x) \
static void \
gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
REG_WRITE_FOOTER; \
GEN2_WRITE_FOOTER; \
}
 
#define __gen5_write(x) \
static void \
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
GEN2_WRITE_HEADER; \
ilk_dummy_write(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
REG_WRITE_FOOTER; \
GEN2_WRITE_FOOTER; \
}
 
__gen5_write(8)
__gen5_write(16)
__gen5_write(32)
__gen5_write(64)
__gen2_write(8)
__gen2_write(16)
__gen2_write(32)
__gen2_write(64)
 
#undef __gen5_write
#undef __gen2_write
 
#undef GEN2_WRITE_FOOTER
#undef GEN2_WRITE_HEADER
 
#define GEN6_WRITE_HEADER \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
 
#define GEN6_WRITE_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
 
#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
REG_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE(reg)) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
__raw_i915_write##x(dev_priv, reg, val); \
939,7 → 877,7
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
REG_WRITE_FOOTER; \
GEN6_WRITE_FOOTER; \
}
 
#define __hsw_write(x) \
946,8 → 884,8
static void \
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
u32 __fifo_ret = 0; \
REG_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE(reg)) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
957,9 → 895,17
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
GEN6_WRITE_FOOTER; \
}
 
#define __vgpu_write(x) \
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
off_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
 
static const u32 gen8_shadowed_regs[] = {
FORCEWAKE_MT,
GEN6_RPNSWREQ,
984,50 → 930,31
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
FORCEWAKE_ALL); \
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
__raw_i915_write##x(dev_priv, reg, val); \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
FORCEWAKE_ALL); \
} else { \
__raw_i915_write##x(dev_priv, reg, val); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
GEN6_WRITE_FOOTER; \
}
 
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
unsigned fwengine = 0; \
bool shadowed = is_gen8_shadowed(dev_priv, reg); \
REG_WRITE_HEADER; \
GEN6_WRITE_HEADER; \
if (!shadowed) { \
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
} \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
__raw_i915_write##x(dev_priv, reg, val); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
REG_WRITE_FOOTER; \
GEN6_WRITE_FOOTER; \
}
 
static const u32 gen9_shadowed_regs[] = {
1057,36 → 984,26
static void \
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
bool trace) { \
REG_WRITE_HEADER; \
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
is_gen9_shadowed(dev_priv, reg)) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (!SKL_NEEDS_FORCE_WAKE(reg) || \
is_gen9_shadowed(dev_priv, reg)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
else \
fw_engine = FORCEWAKE_BLITTER; \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
} else { \
unsigned fwengine = 0; \
if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} else { \
if (dev_priv->uncore.fw_blittercount == 0) \
fwengine = FORCEWAKE_BLITTER; \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
fwengine); \
__raw_i915_write##x(dev_priv, reg, val); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
fwengine); \
} \
REG_WRITE_FOOTER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
 
__gen9_write(8)
1109,14 → 1026,10
__gen6_write(16)
__gen6_write(32)
__gen6_write(64)
__gen5_write(8)
__gen5_write(16)
__gen5_write(32)
__gen5_write(64)
__gen4_write(8)
__gen4_write(16)
__gen4_write(32)
__gen4_write(64)
__vgpu_write(8)
__vgpu_write(16)
__vgpu_write(32)
__vgpu_write(64)
 
#undef __gen9_write
#undef __chv_write
1123,10 → 1036,9
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
#undef __gen5_write
#undef __gen4_write
#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
#undef __vgpu_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
 
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
1144,24 → 1056,87
dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)
 
void intel_uncore_init(struct drm_device *dev)
 
static void fw_domain_init(struct drm_i915_private *dev_priv,
enum forcewake_domain_id domain_id,
u32 reg_set, u32 reg_ack)
{
struct intel_uncore_forcewake_domain *d;
 
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
 
d = &dev_priv->uncore.fw_domain[domain_id];
 
WARN_ON(d->wake_count);
 
d->wake_count = 0;
d->reg_set = reg_set;
d->reg_ack = reg_ack;
 
if (IS_GEN6(dev_priv)) {
d->val_reset = 0;
d->val_set = FORCEWAKE_KERNEL;
d->val_clear = 0;
} else {
/* WaRsClearFWBitsAtReset:bdw,skl */
d->val_reset = _MASKED_BIT_DISABLE(0xffff);
d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
}
 
if (IS_VALLEYVIEW(dev_priv))
d->reg_post = FORCEWAKE_ACK_VLV;
else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
d->reg_post = ECOBUS;
else
d->reg_post = 0;
 
d->i915 = dev_priv;
d->id = domain_id;
 
setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
 
dev_priv->uncore.fw_domains |= (1 << domain_id);
 
fw_domain_reset(d);
}
 
static void intel_uncore_fw_domains_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
if (INTEL_INFO(dev_priv->dev)->gen <= 5)
return;
 
__intel_uncore_early_sanitize(dev, false);
 
if (IS_GEN9(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
if (!IS_CHERRYVIEW(dev))
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
else
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
 
1174,36 → 1149,61
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
 
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
* not working. In this stage we don't know which flavour this
* ivb is, so it is better to reset also the gen6 fw registers
* before the ecobus check.
*/
 
__raw_i915_write32(dev_priv, FORCEWAKE, 0);
__raw_posting_read(dev_priv, ECOBUS);
 
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
mutex_lock(&dev->struct_mutex);
__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
dev_priv->uncore.funcs.force_wake_get =
__gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
__gen7_gt_force_wake_mt_put;
} else {
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
} else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get;
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
fw_domains_put_with_fifo;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
 
/* All future platforms are expected to require complex power gating */
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
 
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
i915_check_vgpu(dev);
 
intel_uncore_ellc_detect(dev);
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
 
switch (INTEL_INFO(dev)->gen) {
default:
WARN_ON(1);
return;
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
ASSIGN_READ_MMIO_VFUNCS(gen9);
1239,11 → 1239,16
case 4:
case 3:
case 2:
ASSIGN_WRITE_MMIO_VFUNCS(gen4);
ASSIGN_READ_MMIO_VFUNCS(gen4);
ASSIGN_WRITE_MMIO_VFUNCS(gen2);
ASSIGN_READ_MMIO_VFUNCS(gen2);
break;
}
 
if (intel_vgpu_active(dev)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
}
 
i915_check_and_clear_faults(dev);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
1273,10 → 1278,12
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
unsigned size;
u64 offset;
int i, ret = 0;
 
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (entry->offset == reg->offset &&
if (entry->offset == (reg->offset & -entry->size) &&
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
break;
}
1284,26 → 1291,39
if (i == ARRAY_SIZE(whitelist))
return -EINVAL;
 
switch (entry->size) {
/* We use the low bits to encode extra flags as the register should
* be naturally aligned (and those that are not so aligned merely
* limit the available flags for that register).
*/
offset = entry->offset;
size = entry->size;
size |= reg->offset ^ offset;
 
intel_runtime_pm_get(dev_priv);
 
switch (size) {
case 8 | 1:
reg->val = I915_READ64_2x32(offset, offset+4);
break;
case 8:
reg->val = I915_READ64(reg->offset);
reg->val = I915_READ64(offset);
break;
case 4:
reg->val = I915_READ(reg->offset);
reg->val = I915_READ(offset);
break;
case 2:
reg->val = I915_READ16(reg->offset);
reg->val = I915_READ16(offset);
break;
case 1:
reg->val = I915_READ8(reg->offset);
reg->val = I915_READ8(offset);
break;
default:
WARN_ON(1);
ret = -EINVAL;
goto out;
}
 
out:
intel_runtime_pm_put(dev_priv);
return ret;
}
 
1405,21 → 1425,21
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
I915_WRITE(ILK_GDSR,
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
ret = wait_for((I915_READ(ILK_GDSR) &
ILK_GRDOM_RESET_ENABLE) == 0, 500);
if (ret)
return ret;
 
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
I915_WRITE(ILK_GDSR,
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
ret = wait_for((I915_READ(ILK_GDSR) &
ILK_GRDOM_RESET_ENABLE) == 0, 500);
if (ret)
return ret;
 
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
I915_WRITE(ILK_GDSR, 0);
 
return 0;
}
1445,22 → 1465,91
return ret;
}
 
int intel_gpu_reset(struct drm_device *dev)
static int wait_for_register(struct drm_i915_private *dev_priv,
const u32 reg,
const u32 mask,
const u32 value,
const unsigned long timeout_ms)
{
if (INTEL_INFO(dev)->gen >= 6)
return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
}
 
static int gen8_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int i;
 
for_each_ring(engine, dev_priv, i) {
I915_WRITE(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
 
if (wait_for_register(dev_priv,
RING_RESET_CTL(engine->mmio_base),
RESET_CTL_READY_TO_RESET,
RESET_CTL_READY_TO_RESET,
700)) {
DRM_ERROR("%s: reset request timeout\n", engine->name);
goto not_ready;
}
}
 
return gen6_do_reset(dev);
 
not_ready:
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
 
return -EIO;
}
 
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
{
if (!i915.reset)
return NULL;
 
if (INTEL_INFO(dev)->gen >= 8)
return gen8_do_reset;
else if (INTEL_INFO(dev)->gen >= 6)
return gen6_do_reset;
else if (IS_GEN5(dev))
return ironlake_do_reset(dev);
return ironlake_do_reset;
else if (IS_G4X(dev))
return g4x_do_reset(dev);
return g4x_do_reset;
else if (IS_G33(dev))
return g33_do_reset(dev);
return g33_do_reset;
else if (INTEL_INFO(dev)->gen >= 3)
return i915_do_reset(dev);
return i915_do_reset;
else
return NULL;
}
 
int intel_gpu_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int (*reset)(struct drm_device *);
int ret;
 
reset = intel_get_gpu_reset(dev);
if (reset == NULL)
return -ENODEV;
 
/* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = reset(dev);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
return ret;
}
 
bool intel_has_gpu_reset(struct drm_device *dev)
{
return intel_get_gpu_reset(dev) != NULL;
}
 
void intel_uncore_check_errors(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/drivers/video/drm/i915/kms_display.c
17,6 → 17,7
 
display_t *os_display;
struct drm_i915_gem_object *main_fb_obj;
struct drm_framebuffer *main_framebuffer;
 
u32 cmd_buffer;
u32 cmd_offset;
60,6 → 61,8
int stride;
int ret;
 
ENTER();
 
drm_modeset_lock_all(dev);
 
list_for_each_entry(tmpmode, &connector->modes, head)
103,10 → 106,12
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
 
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
// if (crtc->invert_dimensions)
// swap(hdisplay, vdisplay);
 
fb = crtc->primary->fb;
if(fb == NULL)
fb = main_framebuffer;
 
fb->width = reqmode->width;
fb->height = reqmode->height;
138,9 → 143,11
crtc->enabled = true;
os_display->crtc = crtc;
 
// i915_gem_object_unpin_fence(main_fb_obj);
i915_gem_object_put_fence(main_fb_obj);
 
printf("fb:%p %dx%dx pitch %d format %x\n",
fb,fb->width,fb->height,fb->pitches[0],fb->pixel_format);
 
set.crtc = crtc;
set.x = 0;
set.y = 0;
151,8 → 158,6
 
ret = drm_mode_set_config_internal(&set);
 
drm_modeset_unlock_all(dev);
 
if ( !ret )
{
os_display->width = fb->width;
168,6 → 173,10
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
fb->width, fb->height, crtc);
 
drm_modeset_unlock_all(dev);
 
LEAVE();
 
return ret;
}
 
223,8 → 232,8
 
if( encoder == NULL)
{
DRM_DEBUG_KMS("CONNECTOR %x ID: %d no active encoders\n",
connector, connector->base.id);
DRM_DEBUG_KMS("CONNECTOR %s ID: %d no active encoders\n",
connector->name, connector->base.id);
continue;
};
}
238,9 → 247,18
*boot_connector = connector;
*boot_crtc = crtc;
 
DRM_DEBUG_KMS("CONNECTOR %p ID:%d status:%d ENCODER %p ID: %d CRTC %p ID:%d\n",
connector, connector->base.id, connector->status,
DRM_DEBUG_KMS("CONNECTOR %s ID:%d status:%d ENCODER %p ID: %d CRTC %p ID:%d\n",
connector->name, connector->base.id, connector->status,
encoder, encoder->base.id, crtc, crtc->base.id );
char con_edid[128];
 
memcpy(con_edid, connector->edid_blob_ptr->data, 128);
printf("Manufacturer: %s Model %x Serial Number %u\n",
manufacturer_name(con_edid + 0x08),
(unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)),
(unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8)
+ (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24)));
 
return 0;
}
else
285,6 → 303,8
u32 ifl;
int ret;
 
ENTER();
 
mutex_lock(&dev->struct_mutex);
mutex_lock(&dev->mode_config.mutex);
 
303,16 → 323,12
ret = idr_alloc(&dev->object_name_idr, &main_fb_obj->base, 1, 0, GFP_NOWAIT);
 
main_fb_obj->base.name = ret;
 
/* Allocate a reference for the name table. */
drm_gem_object_reference(&main_fb_obj->base);
 
main_fb_obj->base.handle_count++;
DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, main_fb_obj->base.name );
}
 
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference(&main_fb_obj->base);
 
os_display = GetDisplay();
os_display->ddev = dev;
463,7 → 479,7
 
if (dev_priv->info.cursor_needs_physical)
{
bits = (uint32_t*)KernelAlloc(KMS_CURSOR_WIDTH*KMS_CURSOR_HEIGHT*4);
bits = (uint32_t*)KernelAlloc(KMS_CURSOR_WIDTH*KMS_CURSOR_HEIGHT*8);
if (unlikely(bits == NULL))
return ENOMEM;
cursor->cobj = (struct drm_i915_gem_object *)GetPgAddr(bits);
474,7 → 490,7
if (unlikely(obj == NULL))
return -ENOMEM;
 
ret = i915_gem_obj_ggtt_pin(obj, 0,PIN_MAPPABLE | PIN_NONBLOCK);
ret = i915_gem_object_ggtt_pin(obj, &i915_ggtt_view_normal, 128*1024, PIN_GLOBAL);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
531,6 → 547,8
void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y)
{
struct drm_crtc *crtc = os_display->crtc;
struct drm_plane_state *cursor_state = crtc->cursor->state;
 
x-= cursor->hot_x;
y-= cursor->hot_y;
 
537,14 → 555,13
crtc->cursor_x = x;
crtc->cursor_y = y;
 
cursor_state->crtc_x = x;
cursor_state->crtc_y = y;
 
intel_crtc_update_cursor(crtc, 1);
 
// if (crtc->funcs->cursor_move)
// crtc->funcs->cursor_move(crtc, x, y);
 
};
 
 
cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
{
struct drm_i915_private *dev_priv = os_display->ddev->dev_private;
556,7 → 573,7
old = os_display->cursor;
os_display->cursor = cursor;
 
intel_crtc->cursor_bo = cursor->cobj;
// intel_crtc->cursor_bo = cursor->cobj;
 
if (!dev_priv->info.cursor_needs_physical)
intel_crtc->cursor_addr = i915_gem_obj_ggtt_offset(cursor->cobj);
563,8 → 580,9
else
intel_crtc->cursor_addr = (addr_t)cursor->cobj;
 
intel_crtc->cursor_width = 64;
intel_crtc->cursor_height = 64;
intel_crtc->base.cursor->state->crtc_w = 64;
intel_crtc->base.cursor->state->crtc_h = 64;
intel_crtc->base.cursor->state->rotation = 0;
 
move_cursor_kms(cursor, crtc->cursor_x, crtc->cursor_y);
return old;
574,7 → 592,6
{
struct drm_i915_private *dev_priv = os_display->ddev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(os_display->crtc);
 
struct drm_i915_gem_object *obj = get_fb_obj();
 
fb->name = obj->base.name;
804,10 → 821,11
int ret = 0;
slot = *((u8*)CURRENT_TASK);
 
if( mask_seqno[slot] == os_display->mask_seqno)
if( mask->forced == 0 && mask_seqno[slot] == os_display->mask_seqno)
return 0;
 
memset(mask->bo_map,0,mask->width * mask->height);
if(mask->forced)
memset((void*)mask->bo_map,0,mask->width * mask->height);
 
GetWindowRect(&win);
win.right+= 1;
851,8 → 869,8
return -EINVAL;
}
 
#if 1
if(warn_count < 1000)
#if 0
if(warn_count < 100)
{
printf("left %d top %d right %d bottom %d\n",
ml, mt, mr, mb);
1001,13 → 1019,6
 
 
 
 
 
 
 
 
 
 
#define NSEC_PER_SEC 1000000000L
 
void getrawmonotonic(struct timespec *ts)
1018,30 → 1029,8
ts->tv_nsec = (tmp - ts->tv_sec*100)*10000000;
}
 
void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
while (nsec >= NSEC_PER_SEC) {
/*
* The following asm() prevents the compiler from
* optimising this loop into a modulo operation. See
* also __iter_div_u64_rem() in include/linux/time.h
*/
asm("" : "+rm"(nsec));
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
asm("" : "+rm"(nsec));
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
 
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
 
// wait->flags &= ~WQ_FLAG_EXCLUSIVE;
/drivers/video/drm/i915/main.c
1,3 → 1,5
#include <syscall.h>
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
6,10 → 8,11
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
#include <syscall.h>
 
#include "bitmap.h"
 
#define DRV_NAME "i915 v4.4"
 
#define I915_DEV_CLOSE 0
#define I915_DEV_INIT 1
#define I915_DEV_READY 2
28,6 → 31,19
uint8_t revision;
};
 
struct cmdtable
{
char *key;
int size;
int *val;
};
 
#define CMDENTRY(key, val) {(key), (sizeof(key)-1), &val}
void parse_cmdline(char *cmdline, struct cmdtable *table, char *log, videomode_t *mode);
 
 
int oops_in_progress;
int i915_fbsize = 16;
struct drm_device *main_device;
struct drm_file *drm_file_handlers[256];
videomode_t usermode;
38,15 → 54,6
int _stdcall display_handler(ioctl_t *io);
int init_agp(void);
 
int srv_blit_bitmap(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
int blit_textured(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
int blit_tex(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
void get_pci_info(struct pci_device *dev);
int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv);
55,11 → 62,11
struct drm_file *file);
 
struct cmdtable cmdtable[]= {
// CMDENTRY("-pm=", i915_powersave),
// CMDENTRY("-rc6=", i915_enable_rc6),
// CMDENTRY("-fbc=", i915_enable_fbc),
// CMDENTRY("-ppgt=", i915_enable_ppgtt),
// CMDENTRY("-pc8=", i915_enable_pc8),
CMDENTRY("-FB=", i915_fbsize),
/* CMDENTRY("-pm=", i915.powersave), */
CMDENTRY("-rc6=", i915.enable_rc6),
CMDENTRY("-fbc=", i915.enable_fbc),
CMDENTRY("-ppgt=", i915.enable_ppgtt),
{NULL, 0}
};
 
110,10 → 117,15
 
while(driver_wq_state == I915_DEV_INIT)
{
jiffies = GetTimerTicks();
jiffies = GetClockNs() / 10000000;
delay(1);
};
 
if( driver_wq_state == I915_DEV_CLOSE)
{
asm volatile ("int $0x40"::"a"(-1));
};
 
dev_priv = main_device->dev_private;
cwq = dev_priv->wq;
 
123,7 → 135,7
 
while(driver_wq_state != I915_DEV_CLOSE)
{
jiffies = GetTimerTicks();
jiffies = GetClockNs() / 10000000;
 
key = get_key();
 
186,9 → 198,11
if( GetService("DISPLAY") != 0 )
return 0;
 
printf("\ni915 v3.19-rc2 build %s %s\nusage: i915 [options]\n"
printf("\n%s build %s %s\nusage: i915 [options]\n"
"-FB=<0-9> Set framebuffer size in megabytes (default: 16)\n",
"-pm=<0,1> Enable powersavings, fbc, downclocking, etc. (default: 1 - true)\n",
__DATE__, __TIME__);
DRV_NAME, __DATE__, __TIME__);
 
printf("-rc6=<-1,0-7> Enable power-saving render C-state 6.\n"
" Different stages can be selected via bitmask values\n"
" (0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6).\n"
197,10 → 211,11
printf("-fbc=<-1,0,1> Enable frame buffer compression for power savings\n"
" (default: -1 (use per-chip default))\n");
printf("-ppgt=<0,1> Enable PPGTT (default: true)\n");
printf("-pc8=<0,1> Enable support for low power package C states (PC8+) (default: 0 - false)\n");
 
printf("-l<path> path to log file\n");
printf("-m<WxHxHz> set videomode\n");
 
printf("cmdline %s\n", cmdline);
if( cmdline && *cmdline )
parse_cmdline(cmdline, cmdtable, log, &usermode);
 
209,6 → 224,10
printf("Can't open %s\nExit\n", log);
return 0;
}
else
{
dbgprintf("\nLOG: %s build %s %s\n",DRV_NAME,__DATE__, __TIME__);
}
 
cpu_detect1();
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
237,6 → 256,7
{
driver_wq_state = I915_DEV_CLOSE;
dbgprintf("Epic Fail :(\n");
delay(100);
return 0;
};
 
265,16 → 285,7
#define SRV_SET_MODE 2
#define SRV_GET_CAPS 3
 
#define SRV_CREATE_SURFACE 10
#define SRV_DESTROY_SURFACE 11
#define SRV_LOCK_SURFACE 12
#define SRV_UNLOCK_SURFACE 13
#define SRV_RESIZE_SURFACE 14
#define SRV_BLIT_BITMAP 15
#define SRV_BLIT_TEXTURE 16
#define SRV_BLIT_VIDEO 17
 
 
#define SRV_GET_PCI_INFO 20
#define SRV_I915_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
379,14 → 390,6
retval = drm_gem_open_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_PIN:
retval = i915_gem_pin_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_UNPIN:
retval = i915_gem_unpin_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_GET_CACHING:
retval = i915_gem_get_caching_ioctl(main_device, inp, file);
break;
886,3 → 889,31
return __res;
}
 
#include <linux/math64.h>
 
u64 long_div(u64 dividend, u64 divisor)
{
#if 1
u32 high = divisor >> 32;
u64 quot;
 
if (high == 0) {
quot = div_u64(dividend, divisor);
} else {
int n = 1 + fls(high);
quot = div_u64(dividend >> n, divisor >> n);
 
if (quot != 0)
quot--;
if ((dividend - quot * divisor) >= divisor)
quot++;
}
 
return quot;
#endif
// return dividend / divisor;
};
 
 
 
 
/drivers/video/drm/i915/utils.c
5,8 → 5,9
#include "i915_drv.h"
#include "intel_drv.h"
#include <linux/hdmi.h>
#include <linux/seq_file.h>
#include <linux/fence.h>
 
 
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
{
struct file *filep;
315,62 → 316,72
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
*/
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii)
int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
char *linebuf, size_t linebuflen, bool ascii)
{
const u8 *ptr = buf;
int ngroups;
u8 ch;
int j, lx = 0;
int ascii_column;
int ret;
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
if (!len)
goto nil;
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
if (!is_power_of_2(groupsize) || groupsize > 8)
groupsize = 1;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
 
switch (groupsize) {
case 8: {
ngroups = len / groupsize;
ascii_column = rowsize * 2 + rowsize / groupsize + 1;
 
if (!linebuflen)
goto overflow1;
 
if (!len)
goto nil;
 
if (groupsize == 8) {
const u64 *ptr8 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
(unsigned long long)*(ptr8 + j));
ascii_column = 17 * ngroups + 2;
break;
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
 
case 4: {
} else if (groupsize == 4) {
const u32 *ptr4 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "", *(ptr4 + j));
ascii_column = 9 * ngroups + 2;
break;
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "",
*(ptr4 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
 
case 2: {
} else if (groupsize == 2) {
const u16 *ptr2 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "", *(ptr2 + j));
ascii_column = 5 * ngroups + 2;
break;
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "",
*(ptr2 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
 
default:
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
} else {
for (j = 0; j < len; j++) {
if (linebuflen < lx + 3)
goto overflow2;
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
linebuf[lx++] = hex_asc_lo(ch);
378,23 → 389,29
}
if (j)
lx--;
 
ascii_column = 3 * rowsize + 2;
break;
}
if (!ascii)
goto nil;
 
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
while (lx < ascii_column) {
if (linebuflen < lx + 2)
goto overflow2;
linebuf[lx++] = ' ';
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
}
for (j = 0; j < len; j++) {
if (linebuflen < lx + 2)
goto overflow2;
ch = ptr[j];
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
}
nil:
linebuf[lx] = '\0';
return lx;
overflow2:
linebuf[lx++] = '\0';
overflow1:
return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
}
 
/**
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
* @level: kernel log level (e.g. KERN_DEBUG)
774,5 → 791,59
__call_rcu(head, func, &rcu_sched_ctrlblk);
}
 
int seq_puts(struct seq_file *m, const char *s)
{
return 0;
};
 
__printf(2, 3) int seq_printf(struct seq_file *m, const char *f, ...)
{
return 0;
}
 
 
signed long
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
{
signed long ret;
 
if (WARN_ON(timeout < 0))
return -EINVAL;
 
// trace_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
// trace_fence_wait_end(fence);
return ret;
}
 
void fence_release(struct kref *kref)
{
struct fence *fence =
container_of(kref, struct fence, refcount);
 
// trace_fence_destroy(fence);
 
BUG_ON(!list_empty(&fence->cb_list));
 
if (fence->ops->release)
fence->ops->release(fence);
else
fence_free(fence);
}
 
void fence_free(struct fence *fence)
{
kfree_rcu(fence, rcu);
}
EXPORT_SYMBOL(fence_free);
 
 
ktime_t ktime_get(void)
{
ktime_t t;
 
t.tv64 = GetClockNs();
 
return t;
}